Merge pull request #7400 from catalin-burcea/BAEL-1953
[BAEL 1953] - added StreamTokenizer examples
This commit is contained in:
		
						commit
						192774ef2c
					
				| @ -0,0 +1,76 @@ | ||||
| package com.baeldung.string.streamtokenizer; | ||||
| 
 | ||||
| import java.io.*; | ||||
| import java.util.ArrayList; | ||||
| import java.util.List; | ||||
| 
 | ||||
| public class StreamTokenizerDemo { | ||||
| 
 | ||||
|     private static final String INPUT_FILE = "/stream-tokenizer-example.txt"; | ||||
|     private static final int QUOTE_CHARACTER = '\''; | ||||
|     private static final int DOUBLE_QUOTE_CHARACTER = '"'; | ||||
| 
 | ||||
|     public static List<Object> streamTokenizerWithDefaultConfiguration(Reader reader) throws IOException { | ||||
|         StreamTokenizer streamTokenizer = new StreamTokenizer(reader); | ||||
|         List<Object> tokens = new ArrayList<>(); | ||||
| 
 | ||||
|         int currentToken = streamTokenizer.nextToken(); | ||||
|         while (currentToken != StreamTokenizer.TT_EOF) { | ||||
| 
 | ||||
|             if (streamTokenizer.ttype == StreamTokenizer.TT_NUMBER) { | ||||
|                 tokens.add(streamTokenizer.nval); | ||||
|             } else if (streamTokenizer.ttype == StreamTokenizer.TT_WORD | ||||
|                     || streamTokenizer.ttype == QUOTE_CHARACTER | ||||
|                     || streamTokenizer.ttype == DOUBLE_QUOTE_CHARACTER) { | ||||
|                 tokens.add(streamTokenizer.sval); | ||||
|             } else { | ||||
|                 tokens.add((char) currentToken); | ||||
|             } | ||||
| 
 | ||||
|             currentToken = streamTokenizer.nextToken(); | ||||
|         } | ||||
| 
 | ||||
|         return tokens; | ||||
|     } | ||||
| 
 | ||||
|     public static List<Object> streamTokenizerWithCustomConfiguration(Reader reader) throws IOException { | ||||
|         StreamTokenizer streamTokenizer = new StreamTokenizer(reader); | ||||
|         List<Object> tokens = new ArrayList<>(); | ||||
| 
 | ||||
|         streamTokenizer.wordChars('!', '-'); | ||||
|         streamTokenizer.ordinaryChar('/'); | ||||
|         streamTokenizer.commentChar('#'); | ||||
|         streamTokenizer.eolIsSignificant(true); | ||||
| 
 | ||||
|         int currentToken = streamTokenizer.nextToken(); | ||||
|         while (currentToken != StreamTokenizer.TT_EOF) { | ||||
| 
 | ||||
|             if (streamTokenizer.ttype == StreamTokenizer.TT_NUMBER) { | ||||
|                 tokens.add(streamTokenizer.nval); | ||||
|             } else if (streamTokenizer.ttype == StreamTokenizer.TT_WORD | ||||
|                     || streamTokenizer.ttype == QUOTE_CHARACTER | ||||
|                     || streamTokenizer.ttype == DOUBLE_QUOTE_CHARACTER) { | ||||
|                 tokens.add(streamTokenizer.sval); | ||||
|             } else { | ||||
|                 tokens.add((char) currentToken); | ||||
|             } | ||||
|             currentToken = streamTokenizer.nextToken(); | ||||
|         } | ||||
| 
 | ||||
|         return tokens; | ||||
|     } | ||||
| 
 | ||||
|     public static Reader createReaderFromFile() throws FileNotFoundException { | ||||
|         String inputFile = StreamTokenizerDemo.class.getResource(INPUT_FILE).getFile(); | ||||
|         return new FileReader(inputFile); | ||||
|     } | ||||
| 
 | ||||
|     public static void main(String[] args) throws IOException { | ||||
|         List<Object> tokens1 = streamTokenizerWithDefaultConfiguration(createReaderFromFile()); | ||||
|         List<Object> tokens2 = streamTokenizerWithCustomConfiguration(createReaderFromFile()); | ||||
| 
 | ||||
|         System.out.println(tokens1); | ||||
|         System.out.println(tokens2); | ||||
|     } | ||||
| 
 | ||||
| } | ||||
| @ -0,0 +1,3 @@ | ||||
| 3 quick brown foxes jump over the "lazy" dog! | ||||
| #test1 | ||||
| //test2 | ||||
| @ -0,0 +1,34 @@ | ||||
| package com.baeldung.string.streamtokenizer; | ||||
| 
 | ||||
| import org.junit.Test; | ||||
| 
 | ||||
| import java.io.IOException; | ||||
| import java.io.Reader; | ||||
| import java.util.Arrays; | ||||
| import java.util.List; | ||||
| 
 | ||||
| import static org.junit.Assert.assertArrayEquals; | ||||
| 
 | ||||
| public class StreamTokenizerDemoUnitTest { | ||||
| 
 | ||||
|     @Test | ||||
|     public void whenStreamTokenizerWithDefaultConfigurationIsCalled_ThenCorrectTokensAreReturned() throws IOException { | ||||
|         Reader reader = StreamTokenizerDemo.createReaderFromFile(); | ||||
|         List<Object> expectedTokens = Arrays.asList(3.0, "quick", "brown", "foxes", "jump", "over", "the", "lazy", "dog", '!', '#', "test1"); | ||||
| 
 | ||||
|         List<Object> actualTokens = StreamTokenizerDemo.streamTokenizerWithDefaultConfiguration(reader); | ||||
| 
 | ||||
|         assertArrayEquals(expectedTokens.toArray(), actualTokens.toArray()); | ||||
|     } | ||||
| 
 | ||||
|     @Test | ||||
|     public void whenStreamTokenizerWithCustomConfigurationIsCalled_ThenCorrectTokensAreReturned() throws IOException { | ||||
|         Reader reader = StreamTokenizerDemo.createReaderFromFile(); | ||||
|         List<Object> expectedTokens = Arrays.asList(3.0, "quick", "brown", "foxes", "jump", "over", "the", "\"lazy\"", "dog!", '\n', '\n', '/', '/', "test2"); | ||||
| 
 | ||||
|         List<Object> actualTokens = StreamTokenizerDemo.streamTokenizerWithCustomConfiguration(reader); | ||||
| 
 | ||||
|         assertArrayEquals(expectedTokens.toArray(), actualTokens.toArray()); | ||||
|     } | ||||
| 
 | ||||
| } | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user