Fix parser bug: string values like ")" were confused with delimiter tokens
Both Python and JS parsers used next_token() which returns plain strings
for both delimiter characters and string values, making them
indistinguishable. A string whose value is ")" or "(" would be
misinterpreted as a structural delimiter, causing parse errors.
Fix: use peek() (raw character) for all structural decisions in
parseExpr before consuming via next_token(). Also add enhanced error
logging to sx.js mount/loadComponents for easier future debugging.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -208,17 +208,25 @@ def parse_all(text: str) -> list[Any]:
|
||||
|
||||
|
||||
def _parse_expr(tok: Tokenizer) -> Any:
|
||||
token = tok.next_token()
|
||||
if token is None:
|
||||
# Use peek() (raw character) for structural decisions so that string
|
||||
# values like ")" or "(" don't get confused with actual delimiters.
|
||||
raw = tok.peek()
|
||||
if raw is None:
|
||||
raise ParseError("Unexpected end of input", tok.pos, tok.line, tok.col)
|
||||
if token == "(":
|
||||
if raw in ")]}":
|
||||
tok.next_token() # consume the delimiter
|
||||
raise ParseError(f"Unexpected {raw!r}", tok.pos, tok.line, tok.col)
|
||||
if raw == "(":
|
||||
tok.next_token() # consume the '('
|
||||
return _parse_list(tok, ")")
|
||||
if token == "[":
|
||||
if raw == "[":
|
||||
tok.next_token() # consume the '['
|
||||
return _parse_list(tok, "]")
|
||||
if token == "{":
|
||||
if raw == "{":
|
||||
tok.next_token() # consume the '{'
|
||||
return _parse_map(tok)
|
||||
if token in (")", "]", "}"):
|
||||
raise ParseError(f"Unexpected {token!r}", tok.pos, tok.line, tok.col)
|
||||
# Everything else: strings, keywords, symbols, numbers
|
||||
token = tok.next_token()
|
||||
return token
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user