From 2f8325e2774797dda63285de43abcff998cf371e Mon Sep 17 00:00:00 2001 From: guido Date: Tue, 30 Jan 2007 15:46:57 +0100 Subject: [PATCH] [svn r37602] Fixed support for files ending on a comment rather than a newline, fixed some unicode() call so it's not done on objects that are already unicode. --HG-- branch : trunk --- py/apigen/source/color.py | 3 +-- py/apigen/source/html.py | 5 ++++- py/apigen/source/testing/test_color.py | 4 ++++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/py/apigen/source/color.py b/py/apigen/source/color.py index 63a1c5762..eb3013852 100644 --- a/py/apigen/source/color.py +++ b/py/apigen/source/color.py @@ -4,7 +4,7 @@ import re class PythonSchema(object): """ contains information for syntax coloring """ - comment = [('#', '\n')] + comment = [('#', '\n'), ('#', '$')] multiline_string = ['"""', "'''"] string = ['"""', "'''", '"', "'"] # XXX not complete @@ -125,7 +125,6 @@ class Tokenizer(object): break return data, token - def _check_comments(self, data): # fortunately we don't have to deal with multi-line comments token = None diff --git a/py/apigen/source/html.py b/py/apigen/source/html.py index 79d34e092..6fd39b16f 100644 --- a/py/apigen/source/html.py +++ b/py/apigen/source/html.py @@ -46,7 +46,10 @@ def prepare_line(text, tokenizer, encoding): if type(item) in [str, unicode]: tokens = tokenizer.tokenize(item) for t in tokens: - data = unicode(t.data, encoding) + if not isinstance(t.data, unicode): + data = unicode(t.data, encoding) + else: + data = t.data if t.type in ['keyword', 'alt_keyword', 'number', 'string', 'comment']: ret.append(html.span(data, class_=t.type)) diff --git a/py/apigen/source/testing/test_color.py b/py/apigen/source/testing/test_color.py index 7513fb6a4..08d0ade11 100644 --- a/py/apigen/source/testing/test_color.py +++ b/py/apigen/source/testing/test_color.py @@ -45,8 +45,12 @@ class TestTokenizer(object): assert self.tokens('foo # bar\n') == [Token('foo', type='word'), Token(' ', type='whitespace'), Token('# bar\n', type='comment')] + assert self.tokens("# foo 'bar\n") == [Token("# foo 'bar\n", + type='comment')] + assert self.tokens('# foo') == [Token('# foo', type='comment')] def test_string_simple(self): + assert self.tokens('""') == [Token('""', type='string')] assert self.tokens('"foo"') == [Token('"foo"', type='string')] assert self.tokens('"foo"\'bar\'') == [Token('"foo"', type='string'), Token("'bar'", type='string')]