Package lepl :: Package lexer :: Package lines :: Package _test :: Module word_bug
[hide private]
[frames] | no frames]

Source Code for Module lepl.lexer.lines._test.word_bug

 1   
 2  # The contents of this file are subject to the Mozilla Public License 
 3  # (MPL) Version 1.1 (the "License"); you may not use this file except 
 4  # in compliance with the License. You may obtain a copy of the License 
 5  # at http://www.mozilla.org/MPL/ 
 6  # 
 7  # Software distributed under the License is distributed on an "AS IS" 
 8  # basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See 
 9  # the License for the specific language governing rights and 
10  # limitations under the License. 
11  # 
12  # The Original Code is LEPL (http://www.acooke.org/lepl) 
13  # The Initial Developer of the Original Code is Andrew Cooke. 
14  # Portions created by the Initial Developer are Copyright (C) 2009-2010 
15  # Andrew Cooke (andrew@acooke.org). All Rights Reserved. 
16  # 
17  # Alternatively, the contents of this file may be used under the terms 
18  # of the LGPL license (the GNU Lesser General Public License, 
19  # http://www.gnu.org/licenses/lgpl.html), in which case the provisions 
20  # of the LGPL License are applicable instead of those above. 
21  # 
22  # If you wish to allow use of your version of this file only under the 
23  # terms of the LGPL License and not to allow others to use your version 
24  # of this file under the MPL, indicate your decision by deleting the 
25  # provisions above and replace them with the notice and other provisions 
26  # required by the LGPL License.  If you do not delete the provisions 
27  # above, a recipient may use your version of this file under either the 
28  # MPL or the LGPL License. 
29   
30  ''' 
31  Tests related to a bug when Word() was specified inside Token() with 
32  line-aware parsing. 
33  ''' 
34   
35  from unittest import TestCase 
36   
37  from lepl import * 
38  from lepl.regexp.str import make_str_parser 
39   
40 -class WordBugTest(TestCase):
41
42 - def test_simple(self):
43 with DroppedSpace(): 44 line = (Word()[:] & Drop('\n')) > list 45 lines = line[:] 46 result = lines.parse('abc de f\n pqr\n') 47 assert result == [['abc', 'de', 'f'], ['pqr']], result
48
49 - def test_tokens(self):
50 word = Token(Word()) 51 newline = ~Token('\n') 52 line = (word[:] & newline) > list 53 lines = line[:] 54 result = lines.parse('abc de f\n pqr\n') 55 assert result == [['abc', 'de', 'f'], ['pqr']], result
56
57 - def test_line_any(self):
58 word = Token('[a-z]+') 59 line = Line(word[:]) > list 60 lines = line[:] 61 lines.config.lines() 62 result = lines.parse('abc de f\n pqr\n') 63 assert result == [['abc', 'de', 'f'], ['pqr']], result
64
65 - def test_line_word(self):
66 word = Token(Word()) 67 line = Line(word[:]) > list 68 lines = line[:] 69 lines.config.lines() 70 result = lines.parse('abc de f\n pqr\n') 71 assert result == [['abc', 'de', 'f'], ['pqr']], result
72
73 - def test_line_notnewline(self):
74 word = Token('[^\n ]+') 75 line = Line(word[:]) > list 76 lines = line[:] 77 lines.config.lines() 78 result = lines.parse('abc de f\n pqr\n') 79 assert result == [['abc', 'de', 'f'], ['pqr']], result
80
81 - def test_line_word_explicit(self):
82 word = Token(Word()) 83 line = (LineStart() & word[:] & LineEnd()) > list 84 lines = line[:] 85 lines.config.lines() 86 result = lines.parse('abc de f\n pqr\n') 87 assert result == [['abc', 'de', 'f'], ['pqr']], result
88