helpers.py 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. # helpers.py
  2. import html.entities
  3. import re
  4. import typing
  5. from . import __diag__
  6. from .core import *
  7. from .util import _bslash, _flatten, _escape_regex_range_chars
  8. #
  9. # global helpers
  10. #
  11. def delimited_list(
  12. expr: Union[str, ParserElement],
  13. delim: Union[str, ParserElement] = ",",
  14. combine: bool = False,
  15. min: typing.Optional[int] = None,
  16. max: typing.Optional[int] = None,
  17. *,
  18. allow_trailing_delim: bool = False,
  19. ) -> ParserElement:
  20. """Helper to define a delimited list of expressions - the delimiter
  21. defaults to ','. By default, the list elements and delimiters can
  22. have intervening whitespace, and comments, but this can be
  23. overridden by passing ``combine=True`` in the constructor. If
  24. ``combine`` is set to ``True``, the matching tokens are
  25. returned as a single token string, with the delimiters included;
  26. otherwise, the matching tokens are returned as a list of tokens,
  27. with the delimiters suppressed.
  28. If ``allow_trailing_delim`` is set to True, then the list may end with
  29. a delimiter.
  30. Example::
  31. delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
  32. delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
  33. """
  34. if isinstance(expr, str_type):
  35. expr = ParserElement._literalStringClass(expr)
  36. dlName = "{expr} [{delim} {expr}]...{end}".format(
  37. expr=str(expr.copy().streamline()),
  38. delim=str(delim),
  39. end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
  40. )
  41. if not combine:
  42. delim = Suppress(delim)
  43. if min is not None:
  44. if min < 1:
  45. raise ValueError("min must be greater than 0")
  46. min -= 1
  47. if max is not None:
  48. if min is not None and max <= min:
  49. raise ValueError("max must be greater than, or equal to min")
  50. max -= 1
  51. delimited_list_expr = expr + (delim + expr)[min, max]
  52. if allow_trailing_delim:
  53. delimited_list_expr += Opt(delim)
  54. if combine:
  55. return Combine(delimited_list_expr).set_name(dlName)
  56. else:
  57. return delimited_list_expr.set_name(dlName)
  58. def counted_array(
  59. expr: ParserElement,
  60. int_expr: typing.Optional[ParserElement] = None,
  61. *,
  62. intExpr: typing.Optional[ParserElement] = None,
  63. ) -> ParserElement:
  64. """Helper to define a counted list of expressions.
  65. This helper defines a pattern of the form::
  66. integer expr expr expr...
  67. where the leading integer tells how many expr expressions follow.
  68. The matched tokens returns the array of expr tokens as a list - the
  69. leading count token is suppressed.
  70. If ``int_expr`` is specified, it should be a pyparsing expression
  71. that produces an integer value.
  72. Example::
  73. counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
  74. # in this parser, the leading integer value is given in binary,
  75. # '10' indicating that 2 values are in the array
  76. binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
  77. counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
  78. # if other fields must be parsed after the count but before the
  79. # list items, give the fields results names and they will
  80. # be preserved in the returned ParseResults:
  81. count_with_metadata = integer + Word(alphas)("type")
  82. typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
  83. result = typed_array.parse_string("3 bool True True False")
  84. print(result.dump())
  85. # prints
  86. # ['True', 'True', 'False']
  87. # - items: ['True', 'True', 'False']
  88. # - type: 'bool'
  89. """
  90. intExpr = intExpr or int_expr
  91. array_expr = Forward()
  92. def count_field_parse_action(s, l, t):
  93. nonlocal array_expr
  94. n = t[0]
  95. array_expr <<= (expr * n) if n else Empty()
  96. # clear list contents, but keep any named results
  97. del t[:]
  98. if intExpr is None:
  99. intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
  100. else:
  101. intExpr = intExpr.copy()
  102. intExpr.set_name("arrayLen")
  103. intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
  104. return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
  105. def match_previous_literal(expr: ParserElement) -> ParserElement:
  106. """Helper to define an expression that is indirectly defined from
  107. the tokens matched in a previous expression, that is, it looks for
  108. a 'repeat' of a previous expression. For example::
  109. first = Word(nums)
  110. second = match_previous_literal(first)
  111. match_expr = first + ":" + second
  112. will match ``"1:1"``, but not ``"1:2"``. Because this
  113. matches a previous literal, will also match the leading
  114. ``"1:1"`` in ``"1:10"``. If this is not desired, use
  115. :class:`match_previous_expr`. Do *not* use with packrat parsing
  116. enabled.
  117. """
  118. rep = Forward()
  119. def copy_token_to_repeater(s, l, t):
  120. if t:
  121. if len(t) == 1:
  122. rep << t[0]
  123. else:
  124. # flatten t tokens
  125. tflat = _flatten(t.as_list())
  126. rep << And(Literal(tt) for tt in tflat)
  127. else:
  128. rep << Empty()
  129. expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
  130. rep.set_name("(prev) " + str(expr))
  131. return rep
  132. def match_previous_expr(expr: ParserElement) -> ParserElement:
  133. """Helper to define an expression that is indirectly defined from
  134. the tokens matched in a previous expression, that is, it looks for
  135. a 'repeat' of a previous expression. For example::
  136. first = Word(nums)
  137. second = match_previous_expr(first)
  138. match_expr = first + ":" + second
  139. will match ``"1:1"``, but not ``"1:2"``. Because this
  140. matches by expressions, will *not* match the leading ``"1:1"``
  141. in ``"1:10"``; the expressions are evaluated first, and then
  142. compared, so ``"1"`` is compared with ``"10"``. Do *not* use
  143. with packrat parsing enabled.
  144. """
  145. rep = Forward()
  146. e2 = expr.copy()
  147. rep <<= e2
  148. def copy_token_to_repeater(s, l, t):
  149. matchTokens = _flatten(t.as_list())
  150. def must_match_these_tokens(s, l, t):
  151. theseTokens = _flatten(t.as_list())
  152. if theseTokens != matchTokens:
  153. raise ParseException(
  154. s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
  155. )
  156. rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
  157. expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
  158. rep.set_name("(prev) " + str(expr))
  159. return rep
  160. def one_of(
  161. strs: Union[typing.Iterable[str], str],
  162. caseless: bool = False,
  163. use_regex: bool = True,
  164. as_keyword: bool = False,
  165. *,
  166. useRegex: bool = True,
  167. asKeyword: bool = False,
  168. ) -> ParserElement:
  169. """Helper to quickly define a set of alternative :class:`Literal` s,
  170. and makes sure to do longest-first testing when there is a conflict,
  171. regardless of the input order, but returns
  172. a :class:`MatchFirst` for best performance.
  173. Parameters:
  174. - ``strs`` - a string of space-delimited literals, or a collection of
  175. string literals
  176. - ``caseless`` - treat all literals as caseless - (default= ``False``)
  177. - ``use_regex`` - as an optimization, will
  178. generate a :class:`Regex` object; otherwise, will generate
  179. a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
  180. creating a :class:`Regex` raises an exception) - (default= ``True``)
  181. - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
  182. generated expressions - (default= ``False``)
  183. - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
  184. but will be removed in a future release
  185. Example::
  186. comp_oper = one_of("< = > <= >= !=")
  187. var = Word(alphas)
  188. number = Word(nums)
  189. term = var | number
  190. comparison_expr = term + comp_oper + term
  191. print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
  192. prints::
  193. [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
  194. """
  195. asKeyword = asKeyword or as_keyword
  196. useRegex = useRegex and use_regex
  197. if (
  198. isinstance(caseless, str_type)
  199. and __diag__.warn_on_multiple_string_args_to_oneof
  200. ):
  201. warnings.warn(
  202. "More than one string argument passed to one_of, pass"
  203. " choices as a list or space-delimited string",
  204. stacklevel=2,
  205. )
  206. if caseless:
  207. isequal = lambda a, b: a.upper() == b.upper()
  208. masks = lambda a, b: b.upper().startswith(a.upper())
  209. parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
  210. else:
  211. isequal = lambda a, b: a == b
  212. masks = lambda a, b: b.startswith(a)
  213. parseElementClass = Keyword if asKeyword else Literal
  214. symbols: List[str] = []
  215. if isinstance(strs, str_type):
  216. symbols = strs.split()
  217. elif isinstance(strs, Iterable):
  218. symbols = list(strs)
  219. else:
  220. raise TypeError("Invalid argument to one_of, expected string or iterable")
  221. if not symbols:
  222. return NoMatch()
  223. # reorder given symbols to take care to avoid masking longer choices with shorter ones
  224. # (but only if the given symbols are not just single characters)
  225. if any(len(sym) > 1 for sym in symbols):
  226. i = 0
  227. while i < len(symbols) - 1:
  228. cur = symbols[i]
  229. for j, other in enumerate(symbols[i + 1 :]):
  230. if isequal(other, cur):
  231. del symbols[i + j + 1]
  232. break
  233. elif masks(cur, other):
  234. del symbols[i + j + 1]
  235. symbols.insert(i, other)
  236. break
  237. else:
  238. i += 1
  239. if useRegex:
  240. re_flags: int = re.IGNORECASE if caseless else 0
  241. try:
  242. if all(len(sym) == 1 for sym in symbols):
  243. # symbols are just single characters, create range regex pattern
  244. patt = "[{}]".format(
  245. "".join(_escape_regex_range_chars(sym) for sym in symbols)
  246. )
  247. else:
  248. patt = "|".join(re.escape(sym) for sym in symbols)
  249. # wrap with \b word break markers if defining as keywords
  250. if asKeyword:
  251. patt = r"\b(?:{})\b".format(patt)
  252. ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
  253. if caseless:
  254. # add parse action to return symbols as specified, not in random
  255. # casing as found in input string
  256. symbol_map = {sym.lower(): sym for sym in symbols}
  257. ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
  258. return ret
  259. except re.error:
  260. warnings.warn(
  261. "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
  262. )
  263. # last resort, just use MatchFirst
  264. return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
  265. " | ".join(symbols)
  266. )
  267. def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
  268. """Helper to easily and clearly define a dictionary by specifying
  269. the respective patterns for the key and value. Takes care of
  270. defining the :class:`Dict`, :class:`ZeroOrMore`, and
  271. :class:`Group` tokens in the proper order. The key pattern
  272. can include delimiting markers or punctuation, as long as they are
  273. suppressed, thereby leaving the significant key text. The value
  274. pattern can include named results, so that the :class:`Dict` results
  275. can include named token fields.
  276. Example::
  277. text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
  278. attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
  279. print(attr_expr[1, ...].parse_string(text).dump())
  280. attr_label = label
  281. attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
  282. # similar to Dict, but simpler call format
  283. result = dict_of(attr_label, attr_value).parse_string(text)
  284. print(result.dump())
  285. print(result['shape'])
  286. print(result.shape) # object attribute access works too
  287. print(result.as_dict())
  288. prints::
  289. [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
  290. - color: 'light blue'
  291. - posn: 'upper left'
  292. - shape: 'SQUARE'
  293. - texture: 'burlap'
  294. SQUARE
  295. SQUARE
  296. {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
  297. """
  298. return Dict(OneOrMore(Group(key + value)))
  299. def original_text_for(
  300. expr: ParserElement, as_string: bool = True, *, asString: bool = True
  301. ) -> ParserElement:
  302. """Helper to return the original, untokenized text for a given
  303. expression. Useful to restore the parsed fields of an HTML start
  304. tag into the raw tag text itself, or to revert separate tokens with
  305. intervening whitespace back to the original matching input text. By
  306. default, returns astring containing the original parsed text.
  307. If the optional ``as_string`` argument is passed as
  308. ``False``, then the return value is
  309. a :class:`ParseResults` containing any results names that
  310. were originally matched, and a single token containing the original
  311. matched text from the input string. So if the expression passed to
  312. :class:`original_text_for` contains expressions with defined
  313. results names, you must set ``as_string`` to ``False`` if you
  314. want to preserve those results name values.
  315. The ``asString`` pre-PEP8 argument is retained for compatibility,
  316. but will be removed in a future release.
  317. Example::
  318. src = "this is test <b> bold <i>text</i> </b> normal text "
  319. for tag in ("b", "i"):
  320. opener, closer = make_html_tags(tag)
  321. patt = original_text_for(opener + SkipTo(closer) + closer)
  322. print(patt.search_string(src)[0])
  323. prints::
  324. ['<b> bold <i>text</i> </b>']
  325. ['<i>text</i>']
  326. """
  327. asString = asString and as_string
  328. locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
  329. endlocMarker = locMarker.copy()
  330. endlocMarker.callPreparse = False
  331. matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
  332. if asString:
  333. extractText = lambda s, l, t: s[t._original_start : t._original_end]
  334. else:
  335. def extractText(s, l, t):
  336. t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
  337. matchExpr.set_parse_action(extractText)
  338. matchExpr.ignoreExprs = expr.ignoreExprs
  339. matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
  340. return matchExpr
  341. def ungroup(expr: ParserElement) -> ParserElement:
  342. """Helper to undo pyparsing's default grouping of And expressions,
  343. even if all but one are non-empty.
  344. """
  345. return TokenConverter(expr).add_parse_action(lambda t: t[0])
  346. def locatedExpr(expr: ParserElement) -> ParserElement:
  347. """
  348. (DEPRECATED - future code should use the Located class)
  349. Helper to decorate a returned token with its starting and ending
  350. locations in the input string.
  351. This helper adds the following results names:
  352. - ``locn_start`` - location where matched expression begins
  353. - ``locn_end`` - location where matched expression ends
  354. - ``value`` - the actual parsed results
  355. Be careful if the input text contains ``<TAB>`` characters, you
  356. may want to call :class:`ParserElement.parseWithTabs`
  357. Example::
  358. wd = Word(alphas)
  359. for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
  360. print(match)
  361. prints::
  362. [[0, 'ljsdf', 5]]
  363. [[8, 'lksdjjf', 15]]
  364. [[18, 'lkkjj', 23]]
  365. """
  366. locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
  367. return Group(
  368. locator("locn_start")
  369. + expr("value")
  370. + locator.copy().leaveWhitespace()("locn_end")
  371. )
  372. def nested_expr(
  373. opener: Union[str, ParserElement] = "(",
  374. closer: Union[str, ParserElement] = ")",
  375. content: typing.Optional[ParserElement] = None,
  376. ignore_expr: ParserElement = quoted_string(),
  377. *,
  378. ignoreExpr: ParserElement = quoted_string(),
  379. ) -> ParserElement:
  380. """Helper method for defining nested lists enclosed in opening and
  381. closing delimiters (``"("`` and ``")"`` are the default).
  382. Parameters:
  383. - ``opener`` - opening character for a nested list
  384. (default= ``"("``); can also be a pyparsing expression
  385. - ``closer`` - closing character for a nested list
  386. (default= ``")"``); can also be a pyparsing expression
  387. - ``content`` - expression for items within the nested lists
  388. (default= ``None``)
  389. - ``ignore_expr`` - expression for ignoring opening and closing delimiters
  390. (default= :class:`quoted_string`)
  391. - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
  392. but will be removed in a future release
  393. If an expression is not provided for the content argument, the
  394. nested expression will capture all whitespace-delimited content
  395. between delimiters as a list of separate values.
  396. Use the ``ignore_expr`` argument to define expressions that may
  397. contain opening or closing characters that should not be treated as
  398. opening or closing characters for nesting, such as quoted_string or
  399. a comment expression. Specify multiple expressions using an
  400. :class:`Or` or :class:`MatchFirst`. The default is
  401. :class:`quoted_string`, but if no expressions are to be ignored, then
  402. pass ``None`` for this argument.
  403. Example::
  404. data_type = one_of("void int short long char float double")
  405. decl_data_type = Combine(data_type + Opt(Word('*')))
  406. ident = Word(alphas+'_', alphanums+'_')
  407. number = pyparsing_common.number
  408. arg = Group(decl_data_type + ident)
  409. LPAR, RPAR = map(Suppress, "()")
  410. code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
  411. c_function = (decl_data_type("type")
  412. + ident("name")
  413. + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
  414. + code_body("body"))
  415. c_function.ignore(c_style_comment)
  416. source_code = '''
  417. int is_odd(int x) {
  418. return (x%2);
  419. }
  420. int dec_to_hex(char hchar) {
  421. if (hchar >= '0' && hchar <= '9') {
  422. return (ord(hchar)-ord('0'));
  423. } else {
  424. return (10+ord(hchar)-ord('A'));
  425. }
  426. }
  427. '''
  428. for func in c_function.search_string(source_code):
  429. print("%(name)s (%(type)s) args: %(args)s" % func)
  430. prints::
  431. is_odd (int) args: [['int', 'x']]
  432. dec_to_hex (int) args: [['char', 'hchar']]
  433. """
  434. if ignoreExpr != ignore_expr:
  435. ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
  436. if opener == closer:
  437. raise ValueError("opening and closing strings cannot be the same")
  438. if content is None:
  439. if isinstance(opener, str_type) and isinstance(closer, str_type):
  440. if len(opener) == 1 and len(closer) == 1:
  441. if ignoreExpr is not None:
  442. content = Combine(
  443. OneOrMore(
  444. ~ignoreExpr
  445. + CharsNotIn(
  446. opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
  447. exact=1,
  448. )
  449. )
  450. ).set_parse_action(lambda t: t[0].strip())
  451. else:
  452. content = empty.copy() + CharsNotIn(
  453. opener + closer + ParserElement.DEFAULT_WHITE_CHARS
  454. ).set_parse_action(lambda t: t[0].strip())
  455. else:
  456. if ignoreExpr is not None:
  457. content = Combine(
  458. OneOrMore(
  459. ~ignoreExpr
  460. + ~Literal(opener)
  461. + ~Literal(closer)
  462. + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
  463. )
  464. ).set_parse_action(lambda t: t[0].strip())
  465. else:
  466. content = Combine(
  467. OneOrMore(
  468. ~Literal(opener)
  469. + ~Literal(closer)
  470. + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
  471. )
  472. ).set_parse_action(lambda t: t[0].strip())
  473. else:
  474. raise ValueError(
  475. "opening and closing arguments must be strings if no content expression is given"
  476. )
  477. ret = Forward()
  478. if ignoreExpr is not None:
  479. ret <<= Group(
  480. Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
  481. )
  482. else:
  483. ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
  484. ret.set_name("nested %s%s expression" % (opener, closer))
  485. return ret
  486. def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
  487. """Internal helper to construct opening and closing tag expressions, given a tag name"""
  488. if isinstance(tagStr, str_type):
  489. resname = tagStr
  490. tagStr = Keyword(tagStr, caseless=not xml)
  491. else:
  492. resname = tagStr.name
  493. tagAttrName = Word(alphas, alphanums + "_-:")
  494. if xml:
  495. tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
  496. openTag = (
  497. suppress_LT
  498. + tagStr("tag")
  499. + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
  500. + Opt("/", default=[False])("empty").set_parse_action(
  501. lambda s, l, t: t[0] == "/"
  502. )
  503. + suppress_GT
  504. )
  505. else:
  506. tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
  507. printables, exclude_chars=">"
  508. )
  509. openTag = (
  510. suppress_LT
  511. + tagStr("tag")
  512. + Dict(
  513. ZeroOrMore(
  514. Group(
  515. tagAttrName.set_parse_action(lambda t: t[0].lower())
  516. + Opt(Suppress("=") + tagAttrValue)
  517. )
  518. )
  519. )
  520. + Opt("/", default=[False])("empty").set_parse_action(
  521. lambda s, l, t: t[0] == "/"
  522. )
  523. + suppress_GT
  524. )
  525. closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
  526. openTag.set_name("<%s>" % resname)
  527. # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
  528. openTag.add_parse_action(
  529. lambda t: t.__setitem__(
  530. "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
  531. )
  532. )
  533. closeTag = closeTag(
  534. "end" + "".join(resname.replace(":", " ").title().split())
  535. ).set_name("</%s>" % resname)
  536. openTag.tag = resname
  537. closeTag.tag = resname
  538. openTag.tag_body = SkipTo(closeTag())
  539. return openTag, closeTag
  540. def make_html_tags(
  541. tag_str: Union[str, ParserElement]
  542. ) -> Tuple[ParserElement, ParserElement]:
  543. """Helper to construct opening and closing tag expressions for HTML,
  544. given a tag name. Matches tags in either upper or lower case,
  545. attributes with namespaces and with quoted or unquoted values.
  546. Example::
  547. text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
  548. # make_html_tags returns pyparsing expressions for the opening and
  549. # closing tags as a 2-tuple
  550. a, a_end = make_html_tags("A")
  551. link_expr = a + SkipTo(a_end)("link_text") + a_end
  552. for link in link_expr.search_string(text):
  553. # attributes in the <A> tag (like "href" shown here) are
  554. # also accessible as named results
  555. print(link.link_text, '->', link.href)
  556. prints::
  557. pyparsing -> https://github.com/pyparsing/pyparsing/wiki
  558. """
  559. return _makeTags(tag_str, False)
  560. def make_xml_tags(
  561. tag_str: Union[str, ParserElement]
  562. ) -> Tuple[ParserElement, ParserElement]:
  563. """Helper to construct opening and closing tag expressions for XML,
  564. given a tag name. Matches tags only in the given upper/lower case.
  565. Example: similar to :class:`make_html_tags`
  566. """
  567. return _makeTags(tag_str, True)
  568. any_open_tag: ParserElement
  569. any_close_tag: ParserElement
  570. any_open_tag, any_close_tag = make_html_tags(
  571. Word(alphas, alphanums + "_:").set_name("any tag")
  572. )
  573. _htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
  574. common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
  575. "common HTML entity"
  576. )
  577. def replace_html_entity(t):
  578. """Helper parser action to replace common HTML entities with their special characters"""
  579. return _htmlEntityMap.get(t.entity)
  580. class OpAssoc(Enum):
  581. LEFT = 1
  582. RIGHT = 2
  583. InfixNotationOperatorArgType = Union[
  584. ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
  585. ]
  586. InfixNotationOperatorSpec = Union[
  587. Tuple[
  588. InfixNotationOperatorArgType,
  589. int,
  590. OpAssoc,
  591. typing.Optional[ParseAction],
  592. ],
  593. Tuple[
  594. InfixNotationOperatorArgType,
  595. int,
  596. OpAssoc,
  597. ],
  598. ]
  599. def infix_notation(
  600. base_expr: ParserElement,
  601. op_list: List[InfixNotationOperatorSpec],
  602. lpar: Union[str, ParserElement] = Suppress("("),
  603. rpar: Union[str, ParserElement] = Suppress(")"),
  604. ) -> ParserElement:
  605. """Helper method for constructing grammars of expressions made up of
  606. operators working in a precedence hierarchy. Operators may be unary
  607. or binary, left- or right-associative. Parse actions can also be
  608. attached to operator expressions. The generated parser will also
  609. recognize the use of parentheses to override operator precedences
  610. (see example below).
  611. Note: if you define a deep operator list, you may see performance
  612. issues when using infix_notation. See
  613. :class:`ParserElement.enable_packrat` for a mechanism to potentially
  614. improve your parser performance.
  615. Parameters:
  616. - ``base_expr`` - expression representing the most basic operand to
  617. be used in the expression
  618. - ``op_list`` - list of tuples, one for each operator precedence level
  619. in the expression grammar; each tuple is of the form ``(op_expr,
  620. num_operands, right_left_assoc, (optional)parse_action)``, where:
  621. - ``op_expr`` is the pyparsing expression for the operator; may also
  622. be a string, which will be converted to a Literal; if ``num_operands``
  623. is 3, ``op_expr`` is a tuple of two expressions, for the two
  624. operators separating the 3 terms
  625. - ``num_operands`` is the number of terms for this operator (must be 1,
  626. 2, or 3)
  627. - ``right_left_assoc`` is the indicator whether the operator is right
  628. or left associative, using the pyparsing-defined constants
  629. ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
  630. - ``parse_action`` is the parse action to be associated with
  631. expressions matching this operator expression (the parse action
  632. tuple member may be omitted); if the parse action is passed
  633. a tuple or list of functions, this is equivalent to calling
  634. ``set_parse_action(*fn)``
  635. (:class:`ParserElement.set_parse_action`)
  636. - ``lpar`` - expression for matching left-parentheses; if passed as a
  637. str, then will be parsed as Suppress(lpar). If lpar is passed as
  638. an expression (such as ``Literal('(')``), then it will be kept in
  639. the parsed results, and grouped with them. (default= ``Suppress('(')``)
  640. - ``rpar`` - expression for matching right-parentheses; if passed as a
  641. str, then will be parsed as Suppress(rpar). If rpar is passed as
  642. an expression (such as ``Literal(')')``), then it will be kept in
  643. the parsed results, and grouped with them. (default= ``Suppress(')')``)
  644. Example::
  645. # simple example of four-function arithmetic with ints and
  646. # variable names
  647. integer = pyparsing_common.signed_integer
  648. varname = pyparsing_common.identifier
  649. arith_expr = infix_notation(integer | varname,
  650. [
  651. ('-', 1, OpAssoc.RIGHT),
  652. (one_of('* /'), 2, OpAssoc.LEFT),
  653. (one_of('+ -'), 2, OpAssoc.LEFT),
  654. ])
  655. arith_expr.run_tests('''
  656. 5+3*6
  657. (5+3)*6
  658. -2--11
  659. ''', full_dump=False)
  660. prints::
  661. 5+3*6
  662. [[5, '+', [3, '*', 6]]]
  663. (5+3)*6
  664. [[[5, '+', 3], '*', 6]]
  665. -2--11
  666. [[['-', 2], '-', ['-', 11]]]
  667. """
  668. # captive version of FollowedBy that does not do parse actions or capture results names
  669. class _FB(FollowedBy):
  670. def parseImpl(self, instring, loc, doActions=True):
  671. self.expr.try_parse(instring, loc)
  672. return loc, []
  673. _FB.__name__ = "FollowedBy>"
  674. ret = Forward()
  675. if isinstance(lpar, str):
  676. lpar = Suppress(lpar)
  677. if isinstance(rpar, str):
  678. rpar = Suppress(rpar)
  679. # if lpar and rpar are not suppressed, wrap in group
  680. if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
  681. lastExpr = base_expr | Group(lpar + ret + rpar)
  682. else:
  683. lastExpr = base_expr | (lpar + ret + rpar)
  684. for i, operDef in enumerate(op_list):
  685. opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
  686. if isinstance(opExpr, str_type):
  687. opExpr = ParserElement._literalStringClass(opExpr)
  688. if arity == 3:
  689. if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
  690. raise ValueError(
  691. "if numterms=3, opExpr must be a tuple or list of two expressions"
  692. )
  693. opExpr1, opExpr2 = opExpr
  694. term_name = "{}{} term".format(opExpr1, opExpr2)
  695. else:
  696. term_name = "{} term".format(opExpr)
  697. if not 1 <= arity <= 3:
  698. raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
  699. if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
  700. raise ValueError("operator must indicate right or left associativity")
  701. thisExpr: Forward = Forward().set_name(term_name)
  702. if rightLeftAssoc is OpAssoc.LEFT:
  703. if arity == 1:
  704. matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
  705. elif arity == 2:
  706. if opExpr is not None:
  707. matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
  708. lastExpr + (opExpr + lastExpr)[1, ...]
  709. )
  710. else:
  711. matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
  712. elif arity == 3:
  713. matchExpr = _FB(
  714. lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
  715. ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
  716. elif rightLeftAssoc is OpAssoc.RIGHT:
  717. if arity == 1:
  718. # try to avoid LR with this extra test
  719. if not isinstance(opExpr, Opt):
  720. opExpr = Opt(opExpr)
  721. matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
  722. elif arity == 2:
  723. if opExpr is not None:
  724. matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
  725. lastExpr + (opExpr + thisExpr)[1, ...]
  726. )
  727. else:
  728. matchExpr = _FB(lastExpr + thisExpr) + Group(
  729. lastExpr + thisExpr[1, ...]
  730. )
  731. elif arity == 3:
  732. matchExpr = _FB(
  733. lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
  734. ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
  735. if pa:
  736. if isinstance(pa, (tuple, list)):
  737. matchExpr.set_parse_action(*pa)
  738. else:
  739. matchExpr.set_parse_action(pa)
  740. thisExpr <<= (matchExpr | lastExpr).setName(term_name)
  741. lastExpr = thisExpr
  742. ret <<= lastExpr
  743. return ret
  744. def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
  745. """
  746. (DEPRECATED - use IndentedBlock class instead)
  747. Helper method for defining space-delimited indentation blocks,
  748. such as those used to define block statements in Python source code.
  749. Parameters:
  750. - ``blockStatementExpr`` - expression defining syntax of statement that
  751. is repeated within the indented block
  752. - ``indentStack`` - list created by caller to manage indentation stack
  753. (multiple ``statementWithIndentedBlock`` expressions within a single
  754. grammar should share a common ``indentStack``)
  755. - ``indent`` - boolean indicating whether block must be indented beyond
  756. the current level; set to ``False`` for block of left-most statements
  757. (default= ``True``)
  758. A valid block must contain at least one ``blockStatement``.
  759. (Note that indentedBlock uses internal parse actions which make it
  760. incompatible with packrat parsing.)
  761. Example::
  762. data = '''
  763. def A(z):
  764. A1
  765. B = 100
  766. G = A2
  767. A2
  768. A3
  769. B
  770. def BB(a,b,c):
  771. BB1
  772. def BBA():
  773. bba1
  774. bba2
  775. bba3
  776. C
  777. D
  778. def spam(x,y):
  779. def eggs(z):
  780. pass
  781. '''
  782. indentStack = [1]
  783. stmt = Forward()
  784. identifier = Word(alphas, alphanums)
  785. funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
  786. func_body = indentedBlock(stmt, indentStack)
  787. funcDef = Group(funcDecl + func_body)
  788. rvalue = Forward()
  789. funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
  790. rvalue << (funcCall | identifier | Word(nums))
  791. assignment = Group(identifier + "=" + rvalue)
  792. stmt << (funcDef | assignment | identifier)
  793. module_body = stmt[1, ...]
  794. parseTree = module_body.parseString(data)
  795. parseTree.pprint()
  796. prints::
  797. [['def',
  798. 'A',
  799. ['(', 'z', ')'],
  800. ':',
  801. [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
  802. 'B',
  803. ['def',
  804. 'BB',
  805. ['(', 'a', 'b', 'c', ')'],
  806. ':',
  807. [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
  808. 'C',
  809. 'D',
  810. ['def',
  811. 'spam',
  812. ['(', 'x', 'y', ')'],
  813. ':',
  814. [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
  815. """
  816. backup_stacks.append(indentStack[:])
  817. def reset_stack():
  818. indentStack[:] = backup_stacks[-1]
  819. def checkPeerIndent(s, l, t):
  820. if l >= len(s):
  821. return
  822. curCol = col(l, s)
  823. if curCol != indentStack[-1]:
  824. if curCol > indentStack[-1]:
  825. raise ParseException(s, l, "illegal nesting")
  826. raise ParseException(s, l, "not a peer entry")
  827. def checkSubIndent(s, l, t):
  828. curCol = col(l, s)
  829. if curCol > indentStack[-1]:
  830. indentStack.append(curCol)
  831. else:
  832. raise ParseException(s, l, "not a subentry")
  833. def checkUnindent(s, l, t):
  834. if l >= len(s):
  835. return
  836. curCol = col(l, s)
  837. if not (indentStack and curCol in indentStack):
  838. raise ParseException(s, l, "not an unindent")
  839. if curCol < indentStack[-1]:
  840. indentStack.pop()
  841. NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
  842. INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
  843. PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
  844. UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
  845. if indent:
  846. smExpr = Group(
  847. Opt(NL)
  848. + INDENT
  849. + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
  850. + UNDENT
  851. )
  852. else:
  853. smExpr = Group(
  854. Opt(NL)
  855. + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
  856. + Opt(UNDENT)
  857. )
  858. # add a parse action to remove backup_stack from list of backups
  859. smExpr.add_parse_action(
  860. lambda: backup_stacks.pop(-1) and None if backup_stacks else None
  861. )
  862. smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
  863. blockStatementExpr.ignore(_bslash + LineEnd())
  864. return smExpr.set_name("indented block")
  865. # it's easy to get these comment structures wrong - they're very common, so may as well make them available
  866. c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
  867. "C style comment"
  868. )
  869. "Comment of the form ``/* ... */``"
  870. html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
  871. "Comment of the form ``<!-- ... -->``"
  872. rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
  873. dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
  874. "Comment of the form ``// ... (to end of line)``"
  875. cpp_style_comment = Combine(
  876. Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
  877. ).set_name("C++ style comment")
  878. "Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
  879. java_style_comment = cpp_style_comment
  880. "Same as :class:`cpp_style_comment`"
  881. python_style_comment = Regex(r"#.*").set_name("Python style comment")
  882. "Comment of the form ``# ... (to end of line)``"
  883. # build list of built-in expressions, for future reference if a global default value
  884. # gets updated
  885. _builtin_exprs: List[ParserElement] = [
  886. v for v in vars().values() if isinstance(v, ParserElement)
  887. ]
  888. # pre-PEP8 compatible names
  889. delimitedList = delimited_list
  890. countedArray = counted_array
  891. matchPreviousLiteral = match_previous_literal
  892. matchPreviousExpr = match_previous_expr
  893. oneOf = one_of
  894. dictOf = dict_of
  895. originalTextFor = original_text_for
  896. nestedExpr = nested_expr
  897. makeHTMLTags = make_html_tags
  898. makeXMLTags = make_xml_tags
  899. anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
  900. commonHTMLEntity = common_html_entity
  901. replaceHTMLEntity = replace_html_entity
  902. opAssoc = OpAssoc
  903. infixNotation = infix_notation
  904. cStyleComment = c_style_comment
  905. htmlComment = html_comment
  906. restOfLine = rest_of_line
  907. dblSlashComment = dbl_slash_comment
  908. cppStyleComment = cpp_style_comment
  909. javaStyleComment = java_style_comment
  910. pythonStyleComment = python_style_comment