forked from python/cpython
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdocs_generator.py
More file actions
1575 lines (1321 loc) · 53.5 KB
/
docs_generator.py
File metadata and controls
1575 lines (1321 loc) · 53.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from pathlib import Path
import re
import argparse
from dataclasses import dataclass
import enum
import collections
import typing
import tokenize
import token
from functools import cached_property
import pegen.grammar
from pegen.build import build_parser
LINE_LENGTH = 80
# TODO: handle indentation
HEADER_RE = re.compile(r'..\s+grammar-snippet\s*::\s+(.+)', re.DOTALL)
# Hardcoded so it's the same regardless of how this script is invoked
SCRIPT_NAME = 'Tools/peg_generator/docs_generator.py'
argparser = argparse.ArgumentParser(
prog="docs_generator.py",
description="Re-generate the grammar snippets in docs",
)
argparser.add_argument("grammar_filename", help="Grammar description")
argparser.add_argument(
"docs_dir",
help="Directory with the docs. All .rst files in this "
+ "(and subdirs) will be regenerated.",
)
argparser.add_argument(
'--debug', action='store_true',
help="Include debug information in the generated docs.",
)
argparser.add_argument(
"--image-dir",
help="Directory into which diagrams are written. All .svg files in this "
+ "directory will be removed before new ones are generated. "
+ "Requires the railroad-diagrams library from PyPI.",
)
# TODO: Document all these rules somewhere in the docs
FUTURE_TOPLEVEL_RULES = set()
# Rules that should be replaced by another rule, *if* they have the
# same contents.
# (In the grammar, these rules may have different actions, which we ignore
# here.)
REPLACED_SYNONYMS = {
'literal_expr': 'literal_pattern',
't_primary': 'primary', # TODO: add logic to tell that these are the same
'value_pattern': 'attr',
}
# TODO:
# Do more inlining for the diagrams (we can preserve the names!)
# - OptionalSequence (for import)
# Add tests
# Think about "OptionalSequence with separators":
# Gather:
# s.e+
# Match one or more occurrences of e, separated by s. The generated parse tree
# does not include the separator. This is otherwise identical to (e (s e)*).
#
# Proposal: Optional sequence; but for diagrams only:
# s.{e1, e2, e3}
# Match the given expressions in the given order, separated by separator.
# Each of the expressions is individually optional, but at least one must be given.
# Equavalent to:
# e1 [s e2 [s e3]]
# | e2 [s e3]
# | e3
# (There can be any nonzero number of e_n, not necessarily 3)
#
#
# [[e1 s] e2 s] e3
# | [e1 s] e2 s
# | e1
# TODO:
# Better line wrapping
#
# Line-break with_stmt as:
# with_stmt ::= ['async'] 'with'
# ('(' ','.with_item+ [','] ')' | ','.with_item+)
# ':' block
#
# simplify:
# elif_stmt ::= 'elif' named_expression ':' block [elif_stmt | else_block]
# into:
# elif_stmt ::= ('elif' named_expression ':' block)+ [else_block]
#
# Look at function parameters again
# Check diagram size before inlining sub-diagrams
# Mention the ('.' | '...')+ -> (".")+ simplification in prose
# NEED GRAMMAR CHANGES:
#
# (leave for later)
# Mark an alternative as optimization only, so it doesn't show up in docs.
# For example, in del_t_atom, the rule
# '(' del_target ')'
# is covered by the next rule:
# '(' [del_targets] ')'
#
# (leave for later)
# Give names to the subexpressions here:
# proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]
#
# (leave for later)
# don't simplify:
# try_stmt ::= 'try' ':' block (finally_block | (except_block+ ...
# instead keep 3 separate alternatives, like in the grammar
# Similar for star_targets_tuple_seq
# Ideas for extending the library:
# - Aligning
# - OptionalSequence with separators
def parse_docs(docs_dir):
"""
Get all the top-level rule names, and the files we're updating
"top-level" means a rule that the docs explicitly ask for in
a `grammar-snippet` directive. Anything that references a top-level
rule should link to it.
"""
files_with_grammar = set()
# Maps the name of a top-level rule to the path of the file it's in
toplevel_rule_locations = {}
# List of tuples of top-level rules that appear together
snippet_rule_names = []
for path in Path(docs_dir).glob('**/*.rst'):
with path.open(encoding='utf-8') as file:
for line in file:
if match := HEADER_RE.fullmatch(line):
files_with_grammar.add(path)
names = tuple(match[1].split())
snippet_rule_names.append(names)
for name in names:
if name in toplevel_rule_locations:
raise ValueError(
f'rule {name!r} appears both in '
+ f'{toplevel_rule_locations[name]} and in {path}. It '
+ f'should only be documented in one place.')
toplevel_rule_locations[name] = path
print(f'{toplevel_rule_locations=}')
return files_with_grammar, snippet_rule_names
def main():
args = argparser.parse_args()
files_with_grammar, snippet_rule_names = parse_docs(args.docs_dir)
grammar = Grammar.from_pegen_file(
args.grammar_filename,
snippet_rule_names,
args.debug,
)
for path in files_with_grammar:
update_file(path, grammar)
if args.image_dir:
generate_diagrams(grammar, args.image_dir)
def update_file(path, grammar):
with path.open(encoding='utf-8') as file:
original_lines = []
new_lines = []
ignoring = False
for line in file:
original_lines.append(line)
if ignoring:
is_indented = (not line.strip()
or line.startswith((' ', '\t')))
if not is_indented:
ignoring = False
if not ignoring:
new_lines.append(line)
if match := HEADER_RE.fullmatch(line):
ignoring = True
first_rule_name = match[1].split(maxsplit=1)[0]
snippet = grammar.snippets[first_rule_name]
for line in generate_rule_lines(snippet):
if line.strip():
new_lines.append(f' {line}\n')
else:
new_lines.append('\n')
new_lines.append('\n')
while new_lines and not new_lines[-1].strip():
del new_lines[-1]
if original_lines != new_lines:
print(f'Updating: {path}')
with path.open(encoding='utf-8', mode='w') as file:
file.writelines(new_lines)
else:
print(f'Unchanged: {path}')
class Grammar:
"""A representation of the complete grammar"""
@classmethod
def from_pegen_file(cls, grammar_filename, snippet_rule_names, debug=False):
pegen_grammar, parser, tokenizer = build_parser(grammar_filename)
pegen_rules = dict(pegen_grammar.rules)
rules = {}
for rule_name, pegen_rule in pegen_rules.items():
node = convert_pegen_node(pegen_rule.rhs)
rules[rule_name] = node
self = cls(
rules,
snippet_rule_names,
debug=debug,
)
if self.debug:
for name, rule in self.rules.items():
print()
print(name, rule.format())
for line in rule.dump_tree():
print(line)
return self
def __init__(self, rules, snippet_rule_names, debug=False):
self.rules = rules
self.debug = debug
toplevel_rule_names = set()
for names in snippet_rule_names:
toplevel_rule_names.update(names)
self.toplevel_rule_names = frozenset(toplevel_rule_names)
self._simplify()
self.snippets = {}
for names in snippet_rule_names:
snippet = Snippet(self, names)
self.snippets[names[0]] = snippet
def _simplify(self):
for name, node in self.rules.items():
path = PathEntry.root_path(name)
self.rules[name] = node.simplify(self.rules, path)
# Simplify the rules repeatedly,
# until simplification no longer changes them
old_rules = None
while self.rules != old_rules:
old_rules = self.rules
self._simplify_ruleset()
def _simplify_ruleset(self):
"""Simplify and inline a bunch of rules"""
# Generate new ruleset with simplified nodes
new_ruleset = {}
for rule_name, node in self.rules.items():
new_ruleset[rule_name] = node.simplify(
self.rules, PathEntry.root_path(rule_name),
)
# A rule will be inlined if we were not explicitly asked to provide a
# definition for it (i.e. it is not in `toplevel_rule_names`), and:
# - is only mentioned once, or
# - its definition is short
# - it expands to a single nonterminal
# Count up all the references to rules we're documenting.
reference_counts = collections.Counter()
for node in new_ruleset.values():
for descendant in node.generate_descendants(filter_class=Nonterminal):
name = descendant.value
if name in new_ruleset:
reference_counts[name] += 1
# Inline the rules we found
for name, count in reference_counts.items():
node = new_ruleset[name]
if name not in self.toplevel_rule_names and (
count == 1
or len(node.format()) <= len(name) * 1.2
or isinstance(node, Nonterminal)
):
replaced_name = name
replacement = node
for rule_name, rule_node in new_ruleset.items():
new_node = rule_node.inlined(replaced_name, replacement)
new_ruleset[rule_name] = new_node
del new_ruleset[name]
self.rules = new_ruleset
class Snippet:
"""Represents one group of rules in the documentation"""
def __init__(self, grammar, initial_rule_names):
self.grammar = grammar
self.initial_rule_names = initial_rule_names
self.documented_rules = {
name: grammar.rules[name]
for name in self._get_documented_rule_names()
}
self.rule_names_to_inline = frozenset(self._get_rule_names_to_inline())
def _get_documented_rule_names(self):
"""Figure out all rules to include in this grammar snippet.
This includes the ones we were asked to document (`initial_rule_names`),
and also rules referenced from them (recursively), except ones that will
be documented elsewhere (those in `self.toplevel_rule_names`).
"""
grammar = self.grammar
toplevel_rule_names = grammar.toplevel_rule_names | FUTURE_TOPLEVEL_RULES
rule_names_to_consider = list(self.initial_rule_names)
rule_names_to_generate = []
while rule_names_to_consider:
rule_name = rule_names_to_consider.pop(0)
if rule_name in rule_names_to_generate:
continue
rule_names_to_generate.append(rule_name)
node = grammar.rules[rule_name]
for descendant in node.generate_descendants(filter_class=Nonterminal):
rule_name = descendant.value
if (
rule_name in grammar.rules
and rule_name not in toplevel_rule_names
):
rule_names_to_consider.append(rule_name)
return rule_names_to_generate
def _get_rule_names_to_inline(self):
reference_counts = collections.Counter()
for name, node in self.documented_rules.items():
for descendant in node.generate_descendants(filter_class=Nonterminal):
nonterminal_name = descendant.value
if nonterminal_name in self.documented_rules and nonterminal_name != name:
reference_counts[nonterminal_name] += 1
return frozenset(
name for name, count in reference_counts.items() if count == 1
)
# TODO: Check parentheses are correct in complex cases.
class Precedence(enum.IntEnum):
MIN = enum.auto()
CHOICE = enum.auto()
SEQUENCE = enum.auto()
REPEAT = enum.auto()
LOOKAHEAD = enum.auto()
ATOM = enum.auto()
MAX = enum.auto()
@dataclass
class OutputSymbol:
content: str
def __str__(self):
return self.content
class OutputNodeRepr:
content: str
node: 'Node'
parent_precedence: 'Precedence'
parent_node: 'Node | None'
def __init__(self, node, parent_precedence=Precedence.MIN, parent_node=None):
self.node = node
self.content = node.format_for_precedence(parent_precedence)
self.parent_precedence = parent_precedence
self.parent_node = parent_node
def __str__(self):
return self.content
def visible_len(string):
"""Return the length of string, without markup that Sphinx will hide"""
# Currently, we only use backticks for markup
return len(string.replace('`', ''))
def split_lines(lines: list['OutputLine']):
todo_lines = list(reversed(lines))
while todo_lines:
current_line = todo_lines.pop()
split_result = current_line.split_once()
if split_result is None:
yield current_line
else:
todo_lines.extend(reversed(split_result))
class OutputLine:
def __init__(self, parts, max_length, first_indent='', running_indent=None):
self.max_length = max_length
self.parts = parts
self.first_indent = first_indent
if running_indent is None:
self.running_indent = first_indent
else:
self.running_indent = running_indent
@classmethod
def from_nodes(cls, nodes, max_length, first_indent='', running_indent=None, parent_precedence=Precedence.MIN, parent_node=None):
return cls([OutputNodeRepr(node, parent_precedence, parent_node) for node in nodes], max_length, first_indent, running_indent)
def __str__(self):
return self.first_indent + self.string_representation
def __len__(self):
return visible_len(self.string_representation)
@cached_property
def string_representation(self):
return ' '.join(str(part) for part in self.parts)
def split_once(self):
if len(self) <= self.max_length:
return None
contents_by_length = [(i, part) for (i, part) in enumerate(self.parts)
if isinstance(part, OutputNodeRepr)]
def biggest_contents(element):
i, part = element
return -len(part.content)
contents_by_length.sort(key=biggest_contents)
for i, part in contents_by_length:
results = []
results.append(OutputLine(self.parts[:i], self.max_length, self.first_indent, self.running_indent))
if visible_len(part.content) <= self.max_length:
results.append(OutputLine([part], self.max_length, self.running_indent))
else:
split_part = part.node.split_into_lines(
self.max_length,
self.running_indent,
parent_precedence=part.parent_precedence,
parent_node=part.parent_node,
)
if not split_part:
continue
opening, inner_lines, closing = split_part
results.append(OutputLine(opening, self.max_length, self.running_indent))
results.extend(inner_lines)
results.append(OutputLine(closing, self.max_length, self.running_indent))
results.append(OutputLine(self.parts[i+1:], self.max_length, self.running_indent))
return results
class Node:
def format(self) -> str:
"""Return self's representation, as a single-line string.
"""
raise NotImplementedError()
def __iter__(self):
"""Yield all child nodes."""
raise NotImplementedError()
def inlined(self, replaced_name, replacement):
"""Return a version of self with the given nonterminal replaced.
For example, given:
>>> str(node)
number "+" number
>>> str(replacement)
digit*
we should get:
>>> inlined(node, "number", replacement)
digit* "+" digit*
"""
raise NotImplementedError()
def get_possible_start_tokens(self, rules, rules_considered):
"""Return the set of tokens that strings that match self could start with.
Additionally, if `None` is in the set, the node could match an empty
string.
The result is a Python set, which should be converted to TokenSet
before use. See TokenSet for details.
"""
raise NotImplementedError()
def get_follow_set_for_path(self, path: 'PathEntry', rules: dict):
"""Return the set of tokens that can follow the child node
identified by `path`.
The result depends on parent nodes, which are given as `path`.
The result is a Python set, which should be converted to TokenSet
before use. See TokenSet for details.
Unlike get_possible_start_tokens(), the result should not contain None.
"""
raise NotImplementedError()
def get_rule_follow_set(self, rule_name, rules):
"""Return a set of all tokens that might follow the given rule,
if the rule is matched by a sub-node of self (or self itself).
Additionally, if None is in the returned set, all tokens that could
immediately follow `self` should be added to the result.
"""
raise NotImplementedError()
def simplify(self, rules, path):
"""Simplify self repeatedly until simplification no longer changes it.
This should not be overridden.
"""
node = self
last_node = None
while node != last_node:
last_node = node
node = node.simplify_once(rules, path)
# nifty debug output:
# debug('simplified', last_node.format(), '->', node.format())
# debug('from:')
# for line in last_node.dump_tree():
# debug(line)
# debug('to:')
# for line in node.dump_tree():
# debug(line)
return node
def simplify_once(self, rules, path):
"""Return a simplified version of self."""
return self
def generate_descendants(self, filter_class=None):
"""Yield descendatnts of the given class, recursively.
If filter_class is None, yield all descendants.
"""
if filter_class is None or isinstance(self, filter_class):
yield self
for value in self:
yield from value.generate_descendants(filter_class)
def needs_parens(self, parent_precedence):
return self.precedence < parent_precedence
def format_for_precedence(self, parent_precedence):
"""Like format(), but add parentheses if necessary.
parent_precedence is the Precedence of the enclosing Node.
"""
result = self.format()
if self.needs_parens(parent_precedence):
result = '(' + result + ')'
return result
def dump_tree(self, indent=0):
"""Yield lines of a debug representation of this node."""
yield ' : ' + ' ' * indent + self.format()
def format_lines(self, columns):
"""Yield lines of a string representation, as it should appear in docs.
Concatenating the lines should be equivalent to calling format(),
ignoring whitespace.
"""
yield self.format()
def split_into_lines(self, max_length, indent, parent_precedence, parent_node):
return None
@dataclass(frozen=True)
class Container(Node):
"""Collection of zero or more child items"""
items: list[Node]
def __iter__(self):
yield from self.items
def simplify_item(self, rules, item, path):
return item.simplify(rules, path)
def dump_tree(self, indent=0):
yield ' : ' + ' ' * indent + type(self).__name__ + ':'
for item in self:
yield from item.dump_tree(indent + 1)
def inlined(self, replaced_name, replacement):
self_type = type(self)
return self_type(
[item.inlined(replaced_name, replacement) for item in self.items]
)
@dataclass(frozen=True)
class Choice(Container):
"""Grammar node that matches any of a sequence of alternatives."""
precedence = Precedence.CHOICE
def format(self):
if not self.items:
return '<UNREACHABLE>'
return " | ".join(
item.format_for_precedence(Precedence.CHOICE)
for item in self
)
def simplify_once(self, rules, path):
self_type = type(self)
alternatives = []
is_optional = False
for i, item in enumerate(self):
item = self.simplify_item(rules, item, path.child(self, i))
match item:
case None:
pass
case self.EMPTY:
is_optional = True
# ignore the item
case self.UNREACHABLE:
# ignore the item
pass
case Optional(x):
is_optional = True
alternatives.append([x])
case Sequence(elements):
alternatives.append(elements)
case Choice(sub_alts):
alternatives.extend([a] for a in sub_alts)
case _:
alternatives.append([item])
assert all(isinstance(item, list) for item in alternatives)
# Simplify subsequences: call simplify_subsequence on all
# "tails" of `alternatives`.
new_alts = []
index = 0
while index < len(alternatives):
replacement, num_processed = self.simplify_subsequence(rules, alternatives[index:])
# replacement should be list[list[Node]]
assert isinstance(replacement, list)
assert all(isinstance(alt, list) for alt in replacement)
assert all(isinstance(node, Node) for alt in replacement for node in alt)
# Ensure we make progress
assert num_processed > 0
new_alts.extend(replacement)
index += num_processed
alternatives = new_alts
def wrap(node):
if is_optional:
return Optional(node)
else:
return node
if len(alternatives) == 1:
return wrap(Sequence(alternatives[0]))
return wrap(self_type([
Sequence(alt).simplify(rules, path.child(self, None))
for alt in alternatives
]))
def simplify_subsequence(self, rules, subsequence):
"""Simplify the given subsequence of self's alternatives.
"""
if len(subsequence) >= 2:
# If two or more adjacent alternatives start or end with the
# same item, we pull that item out, and replace the alternatives
# with a sequence of
# [common item, Choice of the remainders of the alts].
first_alt = subsequence[0]
# We do this for both the start and the end; for that we need the
# index of the candidate item (0 or -1) and the slice to get the
# rest of the items.
for index, rest_slice in (
(0, slice(1, None)),
(-1, slice(None, -1)),
):
num_alts_with_common_item = 1
for alt in subsequence[1:]:
if alt[index] != first_alt[index]:
break
num_alts_with_common_item += 1
if num_alts_with_common_item > 1:
common_item = first_alt[index]
remaining_choice = Choice([
Sequence(alt[rest_slice])
for alt in subsequence[:num_alts_with_common_item]
])
if index == 0:
result = [
[common_item, remaining_choice],
]
else:
result = [
[remaining_choice, common_item],
]
return result, num_alts_with_common_item
match subsequence[:2]:
case [
[x],
[Gather(_, x1), Optional()] as result,
] if x == x1:
return [result], 2
return [subsequence[0]], 1
def get_possible_start_tokens(self, rules, rules_considered):
result = set()
for item in self.items:
result.update(item.get_possible_start_tokens(rules, rules_considered))
return result
def get_follow_set_for_path(self, path, rules):
return path.parent_entry.get_follow_set(rules)
def get_rule_follow_set(self, rule_name, rules):
result = set()
for alt in self.items:
result.update(alt.get_rule_follow_set(rule_name, rules))
return result
def split_into_lines(self, max_length, indent, parent_precedence, parent_node):
# There are 3 different styles of multi-line Choice
if self.needs_parens(parent_precedence):
# Wrapped in parentheses. The pipe is between elements,
# and indented 1 extra space.
parens = [OutputSymbol('(')], [OutputSymbol(')')]
first_indents = ' ', ' | '
running_indent = ' '
elif parent_node:
# Not wrapped in parentheses. The pipe is between elements.
parens = [], []
first_indents = ' ', '| '
running_indent = ' '
else:
# A top-level choice. The pipe is in front of each element.
parens = [], []
first_indents = '| ', '| '
running_indent = ' '
return (
parens[0],
[
OutputLine.from_nodes(
[alt],
max_length=max_length - len(running_indent),
first_indent=indent + (
first_indents[0] if i == 0
else first_indents[1]
),
running_indent=indent + running_indent,
parent_node=self,
)
for i, alt in enumerate(self)
],
parens[1],
)
@dataclass(frozen=True)
class Sequence(Container):
precedence = Precedence.SEQUENCE
def simplify_once(self, rules, path):
self_type = type(self)
items = []
for i, item in enumerate(self):
item = self.simplify_item(rules, item, path.child(self, i))
match item:
case self.EMPTY:
pass
case self.UNREACHABLE:
return UNREACHABLE
case Sequence(subitems):
items.extend(subitems)
case _:
items.append(item)
if not items:
return EMPTY
# Simplify subsequences: call simplify_subsequence on all
# "tails" of `items`.
new_items = []
index = 0
while index < len(items):
replacement, num_processed = self.simplify_subsequence(rules, items[index:])
# Single nodes are iterable; they act as a sequence of their
# children, and we don't want that in this case.
assert not isinstance(replacement, Node)
# Ensure we make progress
assert num_processed > 0
new_items.extend(replacement)
index += num_processed
items = new_items
if len(items) == 1:
return items[0]
return self_type(items)
def simplify_subsequence(self, rules, subsequence):
"""Simplify the start of the given (sub)sequence.
Return the simplified result and the number of items that were
simplified.
"""
match subsequence[:2]:
case [e1, ZeroOrMore(Sequence([s, e2]))] if e1 == e2:
return [Gather(s, e2)], 2
return [subsequence[0]], 1
def format(self):
return " ".join(
item.format_for_precedence(Precedence.SEQUENCE)
for item in self
)
def get_possible_start_tokens(self, rules, rules_considered):
if not self.items:
return {None}
result = set()
for item in self.items:
item_start_tokens = item.get_possible_start_tokens(rules, rules_considered)
result.update(item_start_tokens)
item_can_be_empty = (None in item_start_tokens)
if item_can_be_empty:
continue
else:
result.discard(None)
break
return result
def get_follow_set_for_path(self, path, rules):
assert path.node == self
items = self.items[path.position+1:]
result = {None}
for item in items:
item_start_tokens = item.get_possible_start_tokens(rules, set())
result.update(item_start_tokens)
item_can_be_empty = (None in item_start_tokens)
if item_can_be_empty:
continue
else:
result.discard(None)
break
if None in result:
result.discard(None)
result.update(path.parent_entry.get_follow_set(rules))
return result
def get_rule_follow_set(self, rule_name, rules):
result = set()
for item in self.items:
if None in result:
# None in the result means that the start tokens
# of the current item should be added to the result.
tokens_for_item = item.get_possible_start_tokens(rules, set())
# If None is in "tokens_for_item", it means the item
# could match an empty string, and so the *next*
# item's start tokens should be added to the result
# as well.
result.discard(None)
result.update(tokens_for_item)
tokens_for_item = item.get_rule_follow_set(rule_name, rules)
result.update(tokens_for_item)
# If None remained in the result, we'll need to add whatever
# follows this sequence. Conveniently, that's signalled by
# having None in the result.
return result
def split_into_lines(self, max_length, indent, parent_precedence, parent_node):
if self.needs_parens(parent_precedence):
# TODO: This apparently never happens; remove?
return (
[OutputSymbol('(')],
[
OutputLine.from_nodes(
self,
max_length=max_length - 2,
first_indent=indent + ' ',
parent_node=self,
)
],
[OutputSymbol(')')],
)
else:
return (
[],
[
OutputLine.from_nodes(
self,
max_length=max_length,
first_indent=indent,
parent_precedence=self.precedence,
parent_node=self,
)
],
[],
)
@dataclass(frozen=True)
class Decorator(Node):
"""Node with exactly one child"""
item: Node
def __iter__(self):
yield self.item
def dump_tree(self, indent=0):
yield ' : ' + ' ' * indent + type(self).__name__ + ':'
yield from self.item.dump_tree(indent + 1)
def simplify_once(self, rules, path):
self_type = type(self)
item = self.item.simplify(rules, path.child(self))
match item:
case Sequence([x]):
item = x
case self.EMPTY:
return EMPTY
return self_type(item)
def inlined(self, replaced_name, replacement):
self_type = type(self)
return self_type(self.item.inlined(replaced_name, replacement))
def split_into_lines(self, max_length, indent, parent_precedence, parent_node):
return (
[OutputSymbol(self.parens[0])],
[
OutputLine.from_nodes(
[self.item],
max_length=max_length-2,
first_indent=indent + ' ',
parent_node=self,
)
],
[OutputSymbol(self.parens[1])],
)
@dataclass(frozen=True)
class Optional(Decorator):
precedence = Precedence.ATOM
parens = '[', ']'
def format(self):
return '[' + self.item.format() + ']'
def simplify_once(self, rules, path):
match self.item:
# [x [y] | y] -> [x] [y]
case Choice([Sequence([x, Optional(y1)]), y2]) if y1 == y2:
return Sequence([Optional(x), Optional(y1)])
case OneOrMore(x):
return ZeroOrMore(x)
case Optional(x):
return self.item
case self.UNREACHABLE:
return EMPTY
return super().simplify_once(rules, path)