diff --git a/tools/python/jwutils/grammar.py b/tools/python/jwutils/grammar.py index 7e1387e..fc6ea11 100644 --- a/tools/python/jwutils/grammar.py +++ b/tools/python/jwutils/grammar.py @@ -92,8 +92,9 @@ def dump(obj): slog(INFO, "obj.%s = %s (=> %s)" % (str(type(c)), str(c), str(v))) def dump_grammar(prio, grammar): + caller = get_caller_pos() for t, p in grammar.iteritems(): - p.dump(prio) + p.dump(prio, caller=caller) def cleanup_token(tok): tok = tok.strip() @@ -179,6 +180,7 @@ class RuleComp: self.token = token self.tp = tp slog(INFO, "creating rule component >" + self.str() + "<") + assert(token != "{ assignment") def __eq__(self, rhs): if self.token != rhs.token: @@ -288,23 +290,30 @@ class Symbol: return False return True - def dump(self, prio = NOTICE, msg=""): - slog(prio, ",----------------", msg) - slog(prio, "| type =", self.tp) - slog(prio, "| name =", self.name) - slog(prio, "| token =", self.token) - slog(prio, "| sym =", self.sym) - slog(prio, "| term =", self.term) - slog(prio, "| regex =", self.regex) - slog(prio, "| datatype =", self.datatype) - slog(prio, "| is_lexical_element =", self.is_lexical_element) - slog(prio, "| rules =", format_rules(self.rules)) - slog(prio, "`----------------", msg) + def dump(self, prio = NOTICE, msg="", caller=None): + if caller is None: + caller = get_caller_pos(1) + slog(prio, ",----------------", msg, caller=caller) + slog(prio, "| type =", self.tp, caller=caller) + slog(prio, "| name =", self.name, caller=caller) + slog(prio, "| token =", self.token, caller=caller) + slog(prio, "| sym =", self.sym, caller=caller) + slog(prio, "| term =", self.term, caller=caller) + slog(prio, "| regex =", self.regex, caller=caller) + slog(prio, "| datatype =", self.datatype, caller=caller) + slog(prio, "| is_lexical_element =", self.is_lexical_element, caller=caller) + slog(prio, "| rules =", format_rules(self.rules), caller=caller) + slog(prio, "`----------------", msg, caller=caller) def split_list_by(l_, tok): l = copy.deepcopy(l_) return [list(x[1]) for x in itertools.groupby(l, lambda x: x==tok) if not x[0]] + +def split_list_by_regex(l_, regex): + l = copy.deepcopy(l_) + return [list(x[1]) for x in itertools.groupby(l, lambda x: re.match(regex, x)) if not x[0]] + def grammar_parse_ebnf(content_): # remove comments @@ -343,8 +352,8 @@ def grammar_parse_ebnf(content_): raw_rules = split_list_by(raw_lhs_rhs[1], '|') #slog(INFO, "raw_lhs_rhs[1] = ", raw_lhs_rhs[1]) for raw_rule in raw_rules: - #slog(INFO, "raw_rule =", raw_rule) - rule_tokens = split_list_by(raw_rule, ',') + slog(INFO, "raw_rule =", raw_rule) + rule_tokens = split_list_by_regex(raw_rule, ',{}\(\)\[\]') #slog(INFO, "rule_tokens =", rule_tokens) rule = [] for raw_tok in rule_tokens: @@ -363,7 +372,6 @@ def grammar_parse_ebnf(content_): slog(INFO, "Appending production>" + lhs + "< -> ", p.str()) grammar[lhs] = p - dump_grammar(INFO, grammar) return grammar def grammar_get_types(grammar): diff --git a/tools/python/jwutils/log.py b/tools/python/jwutils/log.py index 83ee12d..9790689 100644 --- a/tools/python/jwutils/log.py +++ b/tools/python/jwutils/log.py @@ -72,7 +72,12 @@ def __pad(token, total_size, right_align = False): return space + token return token + space -def slog(prio, *args): # export +def get_caller_pos(up = 1): + assert(up == 1) # TODO: implement this + caller_frame = inspect.currentframe().f_back.f_back + return (basename(caller_frame.f_code.co_filename), caller_frame.f_lineno) + +def slog(prio, *args, **kwargs): # export if prio > level: return @@ -86,10 +91,11 @@ def slog(prio, *args): # export msg += short_prio_str[prio] + ' ' if f_position in flags: - caller_frame = inspect.currentframe().f_back - line = '[' + __pad(str(caller_frame.f_lineno), 4, True) + ']' - file = __pad(basename(caller_frame.f_code.co_filename), 20) - msg += file + line + if 'caller' in kwargs: + name, line = kwargs['caller'] + else: + name, line = get_caller_pos(1) + msg += __pad(name, 20) + '[' + __pad(str(line), 4, True) + ']' if f_color in flags: color_on, color_off = prio_colors[prio]