2011-07-19 21:50:39 +02:00
|
|
|
#
|
|
|
|
# QAPI helper library
|
|
|
|
#
|
|
|
|
# Copyright IBM, Corp. 2011
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
# Copyright (c) 2013 Red Hat Inc.
|
2011-07-19 21:50:39 +02:00
|
|
|
#
|
|
|
|
# Authors:
|
|
|
|
# Anthony Liguori <aliguori@us.ibm.com>
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
# Markus Armbruster <armbru@redhat.com>
|
2011-07-19 21:50:39 +02:00
|
|
|
#
|
2014-03-01 08:40:34 +01:00
|
|
|
# This work is licensed under the terms of the GNU GPL, version 2.
|
|
|
|
# See the COPYING file in the top-level directory.
|
2011-07-19 21:50:39 +02:00
|
|
|
|
|
|
|
from ordereddict import OrderedDict
|
2013-07-27 17:41:56 +02:00
|
|
|
import sys
|
2011-07-19 21:50:39 +02:00
|
|
|
|
2013-05-11 00:46:00 +02:00
|
|
|
builtin_types = [
|
|
|
|
'str', 'int', 'number', 'bool',
|
|
|
|
'int8', 'int16', 'int32', 'int64',
|
|
|
|
'uint8', 'uint16', 'uint32', 'uint64'
|
|
|
|
]
|
|
|
|
|
2013-07-08 16:14:21 +02:00
|
|
|
builtin_type_qtypes = {
|
|
|
|
'str': 'QTYPE_QSTRING',
|
|
|
|
'int': 'QTYPE_QINT',
|
|
|
|
'number': 'QTYPE_QFLOAT',
|
|
|
|
'bool': 'QTYPE_QBOOL',
|
|
|
|
'int8': 'QTYPE_QINT',
|
|
|
|
'int16': 'QTYPE_QINT',
|
|
|
|
'int32': 'QTYPE_QINT',
|
|
|
|
'int64': 'QTYPE_QINT',
|
|
|
|
'uint8': 'QTYPE_QINT',
|
|
|
|
'uint16': 'QTYPE_QINT',
|
|
|
|
'uint32': 'QTYPE_QINT',
|
|
|
|
'uint64': 'QTYPE_QINT',
|
|
|
|
}
|
|
|
|
|
2013-07-27 17:41:56 +02:00
|
|
|
class QAPISchemaError(Exception):
|
|
|
|
def __init__(self, schema, msg):
|
|
|
|
self.fp = schema.fp
|
|
|
|
self.msg = msg
|
|
|
|
self.line = self.col = 1
|
|
|
|
for ch in schema.src[0:schema.pos]:
|
|
|
|
if ch == '\n':
|
|
|
|
self.line += 1
|
|
|
|
self.col = 1
|
|
|
|
elif ch == '\t':
|
|
|
|
self.col = (self.col + 7) % 8 + 1
|
|
|
|
else:
|
|
|
|
self.col += 1
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return "%s:%s:%s: %s" % (self.fp.name, self.line, self.col, self.msg)
|
|
|
|
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
class QAPISchema:
|
|
|
|
|
|
|
|
def __init__(self, fp):
|
|
|
|
self.fp = fp
|
|
|
|
self.src = fp.read()
|
|
|
|
if self.src == '' or self.src[-1] != '\n':
|
|
|
|
self.src += '\n'
|
|
|
|
self.cursor = 0
|
|
|
|
self.exprs = []
|
|
|
|
self.accept()
|
|
|
|
|
|
|
|
while self.tok != None:
|
2013-07-27 17:41:59 +02:00
|
|
|
self.exprs.append(self.get_expr(False))
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
|
|
|
|
def accept(self):
|
|
|
|
while True:
|
|
|
|
self.tok = self.src[self.cursor]
|
2013-07-27 17:41:56 +02:00
|
|
|
self.pos = self.cursor
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
self.cursor += 1
|
|
|
|
self.val = None
|
|
|
|
|
2013-07-27 17:42:01 +02:00
|
|
|
if self.tok == '#':
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
self.cursor = self.src.find('\n', self.cursor)
|
|
|
|
elif self.tok in ['{', '}', ':', ',', '[', ']']:
|
|
|
|
return
|
|
|
|
elif self.tok == "'":
|
|
|
|
string = ''
|
|
|
|
esc = False
|
|
|
|
while True:
|
|
|
|
ch = self.src[self.cursor]
|
|
|
|
self.cursor += 1
|
|
|
|
if ch == '\n':
|
2013-07-27 17:41:56 +02:00
|
|
|
raise QAPISchemaError(self,
|
|
|
|
'Missing terminating "\'"')
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
if esc:
|
|
|
|
string += ch
|
|
|
|
esc = False
|
|
|
|
elif ch == "\\":
|
|
|
|
esc = True
|
|
|
|
elif ch == "'":
|
|
|
|
self.val = string
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
string += ch
|
|
|
|
elif self.tok == '\n':
|
|
|
|
if self.cursor == len(self.src):
|
|
|
|
self.tok = None
|
|
|
|
return
|
2013-07-27 17:41:57 +02:00
|
|
|
elif not self.tok.isspace():
|
|
|
|
raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
|
|
|
|
def get_members(self):
|
|
|
|
expr = OrderedDict()
|
2013-07-27 17:41:58 +02:00
|
|
|
if self.tok == '}':
|
|
|
|
self.accept()
|
|
|
|
return expr
|
|
|
|
if self.tok != "'":
|
|
|
|
raise QAPISchemaError(self, 'Expected string or "}"')
|
|
|
|
while True:
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
key = self.val
|
|
|
|
self.accept()
|
2013-07-27 17:41:58 +02:00
|
|
|
if self.tok != ':':
|
|
|
|
raise QAPISchemaError(self, 'Expected ":"')
|
|
|
|
self.accept()
|
2013-07-27 17:41:59 +02:00
|
|
|
expr[key] = self.get_expr(True)
|
2013-07-27 17:41:58 +02:00
|
|
|
if self.tok == '}':
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
self.accept()
|
2013-07-27 17:41:58 +02:00
|
|
|
return expr
|
|
|
|
if self.tok != ',':
|
|
|
|
raise QAPISchemaError(self, 'Expected "," or "}"')
|
|
|
|
self.accept()
|
|
|
|
if self.tok != "'":
|
|
|
|
raise QAPISchemaError(self, 'Expected string')
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
|
|
|
|
def get_values(self):
|
|
|
|
expr = []
|
2013-07-27 17:41:58 +02:00
|
|
|
if self.tok == ']':
|
|
|
|
self.accept()
|
|
|
|
return expr
|
|
|
|
if not self.tok in [ '{', '[', "'" ]:
|
|
|
|
raise QAPISchemaError(self, 'Expected "{", "[", "]" or string')
|
|
|
|
while True:
|
2013-07-27 17:41:59 +02:00
|
|
|
expr.append(self.get_expr(True))
|
2013-07-27 17:41:58 +02:00
|
|
|
if self.tok == ']':
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
self.accept()
|
2013-07-27 17:41:58 +02:00
|
|
|
return expr
|
|
|
|
if self.tok != ',':
|
|
|
|
raise QAPISchemaError(self, 'Expected "," or "]"')
|
|
|
|
self.accept()
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
|
2013-07-27 17:41:59 +02:00
|
|
|
def get_expr(self, nested):
|
|
|
|
if self.tok != '{' and not nested:
|
|
|
|
raise QAPISchemaError(self, 'Expected "{"')
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
if self.tok == '{':
|
|
|
|
self.accept()
|
|
|
|
expr = self.get_members()
|
|
|
|
elif self.tok == '[':
|
|
|
|
self.accept()
|
|
|
|
expr = self.get_values()
|
2013-07-27 17:41:58 +02:00
|
|
|
elif self.tok == "'":
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
expr = self.val
|
|
|
|
self.accept()
|
2013-07-27 17:41:58 +02:00
|
|
|
else:
|
|
|
|
raise QAPISchemaError(self, 'Expected "{", "[" or string')
|
qapi.py: Restructure lexer and parser
The parser has a rather unorthodox structure:
Until EOF:
Read a section:
Generator function get_expr() yields one section after the
other, as a string. An unindented, non-empty line that
isn't a comment starts a new section.
Lexing:
Split section into a list of tokens (strings), with help
of generator function tokenize().
Parsing:
Parse the first expression from the list of tokens, with
parse(), throw away any remaining tokens.
In parse_schema(): record value of an enum, union or
struct key (if any) in the appropriate global table,
append expression to the list of expressions.
Return list of expressions.
Known issues:
(1) Indentation is significant, unlike in real JSON.
(2) Neither lexer nor parser have any idea of source positions. Error
reporting is hard, let's go shopping.
(3) The one error we bother to detect, we "report" via raise.
(4) The lexer silently ignores invalid characters.
(5) If everything in a section gets ignored, the parser crashes.
(6) The lexer treats a string containing a structural character exactly
like the structural character.
(7) Tokens trailing the first expression in a section are silently
ignored.
(8) The parser accepts any token in place of a colon.
(9) The parser treats comma as optional.
(10) parse() crashes on unexpected EOF.
(11) parse_schema() crashes when a section's expression isn't a JSON
object.
Replace this piece of original art by a thoroughly unoriginal design.
Takes care of (1), (2), (5), (6) and (7), and lays the groundwork for
addressing the others. Generated source files remain unchanged.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-id: 1374939721-7876-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-07-27 17:41:55 +02:00
|
|
|
return expr
|
2013-07-01 16:31:50 +02:00
|
|
|
|
|
|
|
def parse_schema(fp):
|
2013-07-27 17:41:56 +02:00
|
|
|
try:
|
|
|
|
schema = QAPISchema(fp)
|
2013-08-20 16:50:15 +02:00
|
|
|
except QAPISchemaError, e:
|
2013-07-27 17:41:56 +02:00
|
|
|
print >>sys.stderr, e
|
|
|
|
exit(1)
|
|
|
|
|
2013-07-01 16:31:50 +02:00
|
|
|
exprs = []
|
|
|
|
|
2013-07-27 17:42:00 +02:00
|
|
|
for expr in schema.exprs:
|
|
|
|
if expr.has_key('enum'):
|
|
|
|
add_enum(expr['enum'])
|
|
|
|
elif expr.has_key('union'):
|
|
|
|
add_union(expr)
|
|
|
|
add_enum('%sKind' % expr['union'])
|
|
|
|
elif expr.has_key('type'):
|
|
|
|
add_struct(expr)
|
|
|
|
exprs.append(expr)
|
2011-07-19 21:50:39 +02:00
|
|
|
|
|
|
|
return exprs
|
|
|
|
|
|
|
|
def parse_args(typeinfo):
|
2013-07-01 16:31:51 +02:00
|
|
|
if isinstance(typeinfo, basestring):
|
|
|
|
struct = find_struct(typeinfo)
|
|
|
|
assert struct != None
|
|
|
|
typeinfo = struct['data']
|
|
|
|
|
2011-07-19 21:50:39 +02:00
|
|
|
for member in typeinfo:
|
|
|
|
argname = member
|
|
|
|
argentry = typeinfo[member]
|
|
|
|
optional = False
|
|
|
|
structured = False
|
|
|
|
if member.startswith('*'):
|
|
|
|
argname = member[1:]
|
|
|
|
optional = True
|
|
|
|
if isinstance(argentry, OrderedDict):
|
|
|
|
structured = True
|
|
|
|
yield (argname, argentry, optional, structured)
|
|
|
|
|
|
|
|
def de_camel_case(name):
|
|
|
|
new_name = ''
|
|
|
|
for ch in name:
|
|
|
|
if ch.isupper() and new_name:
|
|
|
|
new_name += '_'
|
|
|
|
if ch == '-':
|
|
|
|
new_name += '_'
|
|
|
|
else:
|
|
|
|
new_name += ch.lower()
|
|
|
|
return new_name
|
|
|
|
|
|
|
|
def camel_case(name):
|
|
|
|
new_name = ''
|
|
|
|
first = True
|
|
|
|
for ch in name:
|
|
|
|
if ch in ['_', '-']:
|
|
|
|
first = True
|
|
|
|
elif first:
|
|
|
|
new_name += ch.upper()
|
|
|
|
first = False
|
|
|
|
else:
|
|
|
|
new_name += ch.lower()
|
|
|
|
return new_name
|
|
|
|
|
2012-09-19 16:31:06 +02:00
|
|
|
def c_var(name, protect=True):
|
2012-07-30 17:46:55 +02:00
|
|
|
# ANSI X3J11/88-090, 3.1.1
|
|
|
|
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
|
|
|
|
'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
|
|
|
|
'for', 'goto', 'if', 'int', 'long', 'register', 'return',
|
|
|
|
'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
|
|
|
|
'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
|
|
|
|
# ISO/IEC 9899:1999, 6.4.1
|
|
|
|
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
|
|
|
|
# ISO/IEC 9899:2011, 6.4.1
|
|
|
|
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
|
|
|
|
'_Static_assert', '_Thread_local'])
|
|
|
|
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
|
|
|
|
# excluding _.*
|
|
|
|
gcc_words = set(['asm', 'typeof'])
|
2013-08-07 17:39:43 +02:00
|
|
|
# C++ ISO/IEC 14882:2003 2.11
|
|
|
|
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
|
|
|
|
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
|
|
|
|
'namespace', 'new', 'operator', 'private', 'protected',
|
|
|
|
'public', 'reinterpret_cast', 'static_cast', 'template',
|
|
|
|
'this', 'throw', 'true', 'try', 'typeid', 'typename',
|
|
|
|
'using', 'virtual', 'wchar_t',
|
|
|
|
# alternative representations
|
|
|
|
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
|
|
|
|
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
|
2012-09-19 16:31:07 +02:00
|
|
|
# namespace pollution:
|
2013-12-20 19:28:18 +01:00
|
|
|
polluted_words = set(['unix', 'errno'])
|
2013-08-07 17:39:43 +02:00
|
|
|
if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words):
|
2012-07-30 17:46:55 +02:00
|
|
|
return "q_" + name
|
2012-03-20 14:54:35 +01:00
|
|
|
return name.replace('-', '_').lstrip("*")
|
|
|
|
|
2012-09-19 16:31:06 +02:00
|
|
|
def c_fun(name, protect=True):
|
|
|
|
return c_var(name, protect).replace('.', '_')
|
2011-07-19 21:50:39 +02:00
|
|
|
|
|
|
|
def c_list_type(name):
|
|
|
|
return '%sList' % name
|
|
|
|
|
|
|
|
def type_name(name):
|
|
|
|
if type(name) == list:
|
|
|
|
return c_list_type(name[0])
|
|
|
|
return name
|
|
|
|
|
|
|
|
enum_types = []
|
2013-07-01 16:31:51 +02:00
|
|
|
struct_types = []
|
2013-07-16 10:49:41 +02:00
|
|
|
union_types = []
|
2013-07-01 16:31:51 +02:00
|
|
|
|
|
|
|
def add_struct(definition):
|
|
|
|
global struct_types
|
|
|
|
struct_types.append(definition)
|
|
|
|
|
|
|
|
def find_struct(name):
|
|
|
|
global struct_types
|
|
|
|
for struct in struct_types:
|
|
|
|
if struct['type'] == name:
|
|
|
|
return struct
|
|
|
|
return None
|
2011-07-19 21:50:39 +02:00
|
|
|
|
2013-07-16 10:49:41 +02:00
|
|
|
def add_union(definition):
|
|
|
|
global union_types
|
|
|
|
union_types.append(definition)
|
|
|
|
|
|
|
|
def find_union(name):
|
|
|
|
global union_types
|
|
|
|
for union in union_types:
|
|
|
|
if union['union'] == name:
|
|
|
|
return union
|
|
|
|
return None
|
|
|
|
|
2011-07-19 21:50:39 +02:00
|
|
|
def add_enum(name):
|
|
|
|
global enum_types
|
|
|
|
enum_types.append(name)
|
|
|
|
|
|
|
|
def is_enum(name):
|
|
|
|
global enum_types
|
|
|
|
return (name in enum_types)
|
|
|
|
|
|
|
|
def c_type(name):
|
|
|
|
if name == 'str':
|
|
|
|
return 'char *'
|
|
|
|
elif name == 'int':
|
|
|
|
return 'int64_t'
|
2012-07-17 16:17:06 +02:00
|
|
|
elif (name == 'int8' or name == 'int16' or name == 'int32' or
|
|
|
|
name == 'int64' or name == 'uint8' or name == 'uint16' or
|
|
|
|
name == 'uint32' or name == 'uint64'):
|
|
|
|
return name + '_t'
|
2012-07-17 16:17:07 +02:00
|
|
|
elif name == 'size':
|
|
|
|
return 'uint64_t'
|
2011-07-19 21:50:39 +02:00
|
|
|
elif name == 'bool':
|
|
|
|
return 'bool'
|
|
|
|
elif name == 'number':
|
|
|
|
return 'double'
|
|
|
|
elif type(name) == list:
|
|
|
|
return '%s *' % c_list_type(name[0])
|
|
|
|
elif is_enum(name):
|
|
|
|
return name
|
|
|
|
elif name == None or len(name) == 0:
|
|
|
|
return 'void'
|
|
|
|
elif name == name.upper():
|
|
|
|
return '%sEvent *' % camel_case(name)
|
|
|
|
else:
|
|
|
|
return '%s *' % name
|
|
|
|
|
|
|
|
def genindent(count):
|
|
|
|
ret = ""
|
|
|
|
for i in range(count):
|
|
|
|
ret += " "
|
|
|
|
return ret
|
|
|
|
|
|
|
|
indent_level = 0
|
|
|
|
|
|
|
|
def push_indent(indent_amount=4):
|
|
|
|
global indent_level
|
|
|
|
indent_level += indent_amount
|
|
|
|
|
|
|
|
def pop_indent(indent_amount=4):
|
|
|
|
global indent_level
|
|
|
|
indent_level -= indent_amount
|
|
|
|
|
|
|
|
def cgen(code, **kwds):
|
|
|
|
indent = genindent(indent_level)
|
|
|
|
lines = code.split('\n')
|
|
|
|
lines = map(lambda x: indent + x, lines)
|
|
|
|
return '\n'.join(lines) % kwds + '\n'
|
|
|
|
|
|
|
|
def mcgen(code, **kwds):
|
|
|
|
return cgen('\n'.join(code.split('\n')[1:-1]), **kwds)
|
|
|
|
|
|
|
|
def basename(filename):
|
|
|
|
return filename.split("/")[-1]
|
|
|
|
|
|
|
|
def guardname(filename):
|
2011-11-29 23:47:48 +01:00
|
|
|
guard = basename(filename).rsplit(".", 1)[0]
|
|
|
|
for substr in [".", " ", "-"]:
|
|
|
|
guard = guard.replace(substr, "_")
|
|
|
|
return guard.upper() + '_H'
|
2013-05-11 00:46:00 +02:00
|
|
|
|
|
|
|
def guardstart(name):
|
|
|
|
return mcgen('''
|
|
|
|
|
|
|
|
#ifndef %(name)s
|
|
|
|
#define %(name)s
|
|
|
|
|
|
|
|
''',
|
|
|
|
name=guardname(name))
|
|
|
|
|
|
|
|
def guardend(name):
|
|
|
|
return mcgen('''
|
|
|
|
|
|
|
|
#endif /* %(name)s */
|
|
|
|
|
|
|
|
''',
|
|
|
|
name=guardname(name))
|