From 68c4f3a6e17c5c1743a8fbb80d61d826bd96a7bd Mon Sep 17 00:00:00 2001 From: ismagom Date: Fri, 13 Nov 2015 12:42:43 +0100 Subject: [PATCH] Removed high-level modules and scripts --- COPYRIGHT | 26 - scripts/mod2aloe.py | 60 - scripts/mod2xml.py | 31 - scripts/module.py | 311 -- scripts/module.pyc | Bin 9291 -> 0 bytes scripts/module/__init__.py | 319 -- scripts/module/__init__.pyc | Bin 9701 -> 0 bytes scripts/module/pyclibrary/CLibrary.py | 501 --- scripts/module/pyclibrary/CParser.py | 1274 ------ scripts/module/pyclibrary/README.md | 8 - scripts/module/pyclibrary/__init__.py | 3 - scripts/module/pyclibrary/license.txt | 7 - scripts/module/pyclibrary/pyparsing.py | 3754 ----------------- scripts/xml2aloe/__init__.py | 117 - scripts/xml2aloe/__init__.pyc | Bin 4093 -> 0 bytes scripts/xml2aloe/template/CMakeLists.txt | 94 - scripts/xml2aloe/template/src/template.c | 100 - scripts/xml2aloe/template/src/template.h | 59 - .../xml2aloe/template/test/test_generate.c | 83 - srslte/include/srslte/channel/ch_awgn.h | 16 - srslte/include/srslte/fec/convcoder.h | 22 - srslte/include/srslte/fec/rm_conv.h | 22 - srslte/include/srslte/fec/rm_turbo.h | 21 - srslte/include/srslte/fec/viterbi.h | 23 - srslte/include/srslte/io/binsource.h | 18 - srslte/include/srslte/io/filesink.h | 17 - srslte/include/srslte/io/filesource.h | 20 - srslte/include/srslte/io/netsink.h | 18 - srslte/include/srslte/io/netsource.h | 20 - srslte/include/srslte/modem/demod_hard.h | 19 - srslte/include/srslte/modem/demod_soft.h | 25 - srslte/include/srslte/modem/mod.h | 18 - srslte/include/srslte/sync/pss.h | 28 - srslte/include/srslte/sync/sss.h | 27 - srslte/lib/channel/src/ch_awgn.c | 16 - srslte/lib/cuhd/src/cuhd_imp.cpp | 2 +- srslte/lib/fec/src/convcoder.c | 21 - srslte/lib/fec/src/rm_conv.c | 23 - srslte/lib/fec/src/rm_turbo.c | 17 - srslte/lib/fec/src/viterbi.c | 49 - srslte/lib/io/src/binsource.c | 44 - srslte/lib/io/src/filesink.c | 17 - srslte/lib/io/src/filesource.c | 17 - srslte/lib/io/src/netsink.c | 17 - srslte/lib/io/src/netsource.c | 17 - srslte/lib/modem/src/demod_hard.c | 18 - srslte/lib/modem/src/demod_soft.c | 16 - srslte/lib/modem/src/mod.c | 24 - srslte/lib/sync/src/sss.c | 27 - srslte/lib/ue/src/ue_ul.c | 2 +- 50 files changed, 2 insertions(+), 7386 deletions(-) delete mode 100644 scripts/mod2aloe.py delete mode 100644 scripts/mod2xml.py delete mode 100644 scripts/module.py delete mode 100644 scripts/module.pyc delete mode 100644 scripts/module/__init__.py delete mode 100644 scripts/module/__init__.pyc delete mode 100644 scripts/module/pyclibrary/CLibrary.py delete mode 100644 scripts/module/pyclibrary/CParser.py delete mode 100644 scripts/module/pyclibrary/README.md delete mode 100644 scripts/module/pyclibrary/__init__.py delete mode 100644 scripts/module/pyclibrary/license.txt delete mode 100644 scripts/module/pyclibrary/pyparsing.py delete mode 100644 scripts/xml2aloe/__init__.py delete mode 100644 scripts/xml2aloe/__init__.pyc delete mode 100644 scripts/xml2aloe/template/CMakeLists.txt delete mode 100644 scripts/xml2aloe/template/src/template.c delete mode 100644 scripts/xml2aloe/template/src/template.h delete mode 100644 scripts/xml2aloe/template/test/test_generate.c diff --git a/COPYRIGHT b/COPYRIGHT index e63620bd1..03aa32cad 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -3,32 +3,6 @@ Paul Sutton . All rights reserved. The following copyright notices are for libraries used within srsLTE: - ------------------------------------------------------------ -CLibrary.py ------------------------------------------------------------ - -Copyright (c) 2003-2011 Paul T. McGuire - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ----------------------------------------------------------- FEC Library - Version 3.0.1 - August 7th, 2007 ----------------------------------------------------------- diff --git a/scripts/mod2aloe.py b/scripts/mod2aloe.py deleted file mode 100644 index c222dce3f..000000000 --- a/scripts/mod2aloe.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2012-2013 The libLTE Developers. See the -# COPYRIGHT file at the top-level directory of this distribution. -# -# This file is part of the libLTE library. -# -# libLTE is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as -# published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. -# -# libLTE is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# A copy of the GNU Lesser General Public License can be found in -# the LICENSE file in the top-level directory of this distribution -# and at http://www.gnu.org/licenses/. -# - - -#!/usr/bin/env python - -from module import Module -from xml2aloe import MakeModule -import sys, os, getopt - -argv = sys.argv -input_file = None -output_dir = None -try: - opts, args = getopt.getopt(argv,"hi:o:",["input_file=","output_dir="]) -except getopt.GetoptError: - print argv[0] + ' -i -o ' - sys.exit(2) -for opt, arg in opts: - if opt == '-h': - print argv[0] + ' -i -o ' - sys.exit() - elif opt in ("-i", "--input_file"): - input_file = arg - elif opt in ("-o", "--output_dir"): - output_dir = arg - -if input_file == None or output_dir == None: - print argv[0] + ' -i -o ' - sys.exit(2) - -filename=os.path.basename(input_file).split('.')[0] - -print filename + '\n' -print input_file + '\n' -print output_dir + '\n' - #m = Module("binsource") - - #m.readHeader(input_file) - #MakeModule(m,output_dir) - #print m.toString() - diff --git a/scripts/mod2xml.py b/scripts/mod2xml.py deleted file mode 100644 index 92bbb49c6..000000000 --- a/scripts/mod2xml.py +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright 2012-2013 The libLTE Developers. See the -# COPYRIGHT file at the top-level directory of this distribution. -# -# This file is part of the libLTE library. -# -# libLTE is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as -# published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. -# -# libLTE is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# A copy of the GNU Lesser General Public License can be found in -# the LICENSE file in the top-level directory of this distribution -# and at http://www.gnu.org/licenses/. -# - - -#!/usr/bin/env python - -from module import Module -from xml2aloe import MakeModule - -m = Module("binsource") -m.readHeader('/home/ismael/work/osld-lib/scripts/binsource.h') -MakeModule(m) -print m.toString() \ No newline at end of file diff --git a/scripts/module.py b/scripts/module.py deleted file mode 100644 index a04c9fa5a..000000000 --- a/scripts/module.py +++ /dev/null @@ -1,311 +0,0 @@ -# -# Copyright 2012-2013 The libLTE Developers. See the -# COPYRIGHT file at the top-level directory of this distribution. -# -# This file is part of the libLTE library. -# -# libLTE is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as -# published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. -# -# libLTE is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# A copy of the GNU Lesser General Public License can be found in -# the LICENSE file in the top-level directory of this distribution -# and at http://www.gnu.org/licenses/. -# - - -from xml.dom.minidom import Document,parseString -from pyclibrary import CParser - -class Module: -# name - nof_inputs=0 - nof_outputs=0 - init_params = [] - input_params = [] - output_params = [] - - def __init__(self,name): - self.name=name - self.init_func=name+'_initialize' - self.work_func=name+'_work' - self.handler=name+'_hl' - self.handler_instance=name - self.init_pm_struct=name+'_init' - self.input_pm_struct=name+'_ctrl_in' - self.output_pm_struct=name+'_ctrl_out' - self.directory_name = 'lib_' + name - - def set_input(self,nof_inputs, input_type, input_size): - self.nof_inputs = nof_inputs - self.input_type = input_type - self.input_size = input_size - - def set_output(self,nof_outputs, output_type, output_size): - self.nof_outputs = nof_outputs - self.output_type = output_type - self.output_size = output_size - - def add_init_param(self,name,type,default): - p={'name':name,'variable':self.name+'.init.'+name,'type':type,'default':default} - self.init_params.append(p) - - def add_input_param(self,name,type,default): - p={'name':name,'variable':self.name+'.ctrl_in.'+name,'type':type,'default':default} - self.input_params.append(p) - - def add_output_param(self,name,type,default): - p={'name':name,'variable':self.name+'.ctrl_in.'+name,'type':type,'default':default} - self.output_params.append(p) - - def toString(self): - s = 'name: ' + self.name + '\n' - s = s + 'handler: ' + self.handler + '\n' - if self.nof_inputs > 0: - s = s + str(self.nof_inputs) + ' ' + self.input_type + ' inputs of size ' + str(self.output.size) + '\n' - else: - s = s + 'no inputs\n' - if self.nof_outputs > 0: - s = s + str(self.nof_outputs) + ' ' + self.output_type + ' outputs of size ' + str(self.output_size) + '\n' - else: - s = s + 'no outputs\n' - - if self.init_params: - s = s + 'Initialization parameters:\n' - for p in self.init_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] + ' = ' + p['default'] + '\n' - - if self.input_params: - s = s + 'Input parameters:\n' - for p in self.input_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] - if p['default'] == None: - s = s + ' (Mandatory)\n' - else: - s = s + ' = ' + p['default'] + '\n' - - if self.output_params: - s = s + 'Output parameters:\n' - for p in self.output_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] + '\n' - return s - - def toXML(self): - root = Document() - - module = root.createElement('module') - root.appendChild(module) - - module.setAttribute("name",self.name) - module.setAttribute("handler",self.handler) - module.setAttribute("handler_instance",self.handler_instance) - - # Functions - functions = root.createElement("functions") - module.appendChild(functions) - functions.setAttribute("initialize",self.init_func) - functions.setAttribute("work",self.work_func) - - # Interfaces - inputs = root.createElement("inputs") - module.appendChild(inputs) - inputs.setAttribute("num",str(self.nof_inputs)) - inputs.setAttribute("type",self.input_type) - inputs.setAttribute("size",self.input_size) - - outputs = root.createElement("outputs") - module.appendChild(outputs) - outputs.setAttribute("num",str(self.nof_outputs)) - outputs.setAttribute("type",self.output_type) - outputs.setAttribute("size",self.output_size) - - # Init Parameters - pinit = root.createElement("init_parameters") - module.appendChild(pinit) - for p in self.init_params: - pi = root.createElement("param") - pinit.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - pi.setAttribute("default",p['default']) - - pinput = root.createElement("input_parameters") - module.appendChild(pinput) - for p in self.input_params: - pi = root.createElement("param") - pinput.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - pi.setAttribute("default",p['default']) - - poutput = root.createElement("output_parameters") - module.appendChild(poutput) - for p in self.input_params: - pi = root.createElement("param") - pinput.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - - - return root.toprettyxml() - - def readXML(self, string): - root = parseString(string) - module = root.getElementsByTagName("module").item(0) - - self.name = module.getAttribute("name") - self.handler = module.getAttribute("handler") - self.handler_instance = module.getAttribute("handler_instance") - - functions = root.getElementsByTagName("functions").item(0) - self.init_func = functions.getAttribute("initialize") - self.work_func = functions.getAttribute("work") - - # Interfaces - inputs = root.getElementsByTagName("inputs").item(0) - self.nof_inputs = int(inputs.getAttribute("num")) - self.input_type = inputs.getAttribute("type") - self.input_size = inputs.getAttribute("size") - - outputs = root.getElementsByTagName("outputs").item(0) - self.nof_outputs = int(outputs.getAttribute("num")) - self.output_type = outputs.getAttribute("type") - self.output_size = outputs.getAttribute("size") - - pinit = root.getElementsByTagName("init_parameters").item(0) - for p in pinit.getElementsByTagName("params"): - self.init_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"), \ - 'type':p.getAttribute("type"),\ - 'default':p.getAttribute("default")}) - - pinput = root.getElementsByTagName("input_parameters").item(0) - for p in pinput.getElementsByTagName("params"): - self.input_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"),\ - 'type':p.getAttribute("type"),\ - 'default':p.getAttribute("default")}) - - poutput = root.getElementsByTagName("output_parameters").item(0) - for p in poutput.getElementsByTagName("params"): - self.output_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"),\ - 'type':p.getAttribute("type")}) - def findMember(self,members, name): - for m in members: - if m[0] == name: - return m - return None - - def findDefault(self, lines, variable): - for line in lines: - if variable in line: - if 'default' in line.lower(): - return str(int(line.split('=')[1].split('*/')[0])) - else: - return None - - def findSize(self, lines, variable): - for line in lines: - if variable in line: - if 'size' in line.lower(): - return line.split('=')[1].split('*/')[0] - else: - return None - - def findLinesStruct(self, lines, struct_name): - slines = [] - state = "nf" - for line in lines: - if state == 'nf': - if 'struct ' + struct_name in line: - state = "f1" - elif state == 'f1': - if '}' in line: - state = 'f2' - return slines - else: - slines.append(line) - - def findLinesHandler(self,file_name): - with open(file_name,'r') as f: - slines = [] - state = "nf" - for line in reversed(f.readlines()): - if state == 'nf': - if self.handler in line and '}' in line: - state = "f1" - elif state == 'f1': - if 'typedef' in line and 'struct' in line: - state = 'f2' - return reversed(slines) - else: - slines.append(line) - - def readHeader(self, file_name): - - p = CParser([file_name]) - h = p.defs['structs'][p.defs['types'][p.defs['types'][self.handler][0]][1]]['members'] - - input = self.findMember(h,'input') - if input == None: - self.nof_inputs = 0 - self.input_type = None - self.input_size = 0 - else: - self.nof_inputs = input[1][2][0] - self.input_type = input[1][0] - size = self.findSize(self.findLinesHandler(file_name), 'input') - if size == None: - size = '2048*20' - self.input_size = size - - output = self.findMember(h,'output') - if output == None: - self.nof_outputs = 0 - self.output_type = None - self.output_size = 0 - else: - self.nof_outputs = output[1][2][0] - self.output_type = output[1][0] - size = self.findSize(self.findLinesHandler(file_name), 'output') - if size == None: - size = '2048*20' - self.output_size = size - - initpm = p.defs['structs'].get(self.init_pm_struct) - if (initpm != None): - for m in initpm['members']: - default = self.findDefault(self.findLinesStruct(\ - self.findLinesHandler(file_name),\ - self.init_pm_struct), m[0]) - if default == None: - default = '0' - self.init_params.append({'name':m[0],'variable':self.name+'.init.'+m[0],\ - 'type':m[1][0],'default':default}) - - - inputpm = p.defs['structs'].get(self.input_pm_struct) - if (inputpm != None): - for m in inputpm['members']: - default = self.findDefault(self.findLinesStruct(\ - self.findLinesHandler(file_name),\ - self.input_pm_struct), m[0]) - self.input_params.append({'name':m[0],'variable':self.name+'.ctrl_in.'+m[0],\ - 'type':m[1][0],'default':default}) - - outputpm = p.defs['structs'].get(self.output_pm_struct) - if (outputpm != None): - for m in outputpm['members']: - self.output_params.append({'name':m[0],'variable':self.name+'.ctrl_out.'+m[0],\ - 'type':m[1][0]}) - \ No newline at end of file diff --git a/scripts/module.pyc b/scripts/module.pyc deleted file mode 100644 index 02ca6586f8b223d05f9f61fd7df8cb4e45541ec7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9291 zcmd5?OK@CA747$C^qJ9bMr>FbAez$Mm z+xOht-OBuXD*wy(ejQX@`Y(guXYq}nMp46m&o!KDdT!lwP2a8i&Lh=#jf`t%-Fg=7 zjH`{hTGsv8x%ZuWV|~QcM%-Fy3>@nJ!M~D^S~8AerCr@=hOH!-K%-NM;&3gA>aER^ z#~ys)xtB?el2O#Jv}-$!uqPV(T*5d01B%d5B8UQsSZ3TN%B;!}l@Q6JNb6%1mz8k` zQCZ2Q(yr$zDJc`20xOfQHm1sytBtEN?P@tyW?U_=O2O48RGD?PNmY)y+LS8CU2R&G z6RtL+%1IZjl?sr!j<2gy72WT1sK?tV_C2@nTv-(eJ;w$}n4fPAUyY14GTr`JQL{!J zQDej!qiT#=1M?qaTWp~N2iwpXtFl75-l`|{N~8XxFc|^0{7yT%m2er9Z#Im=9x`9K znnVq}kmUGSjrLBGWYKEWZNV zE8=gWwsf=I443M0vl2FzDCbf;Zq&|TT1#;?s&|rjsj20-*x55(NO9#d2aAu9nV0jX z^y&UqIUv6@xQzWEDt%+as%B_d6csJxHMzoSi3UjWC|d0ejD!isYN}dJ_B!D}Glua> z94I)!;uMQH7V{{EnJ5h!hlvTWzy|%2GtGHJ&yH5$?96_#R%2>Q&5zI1_K^wRgD1hg zEbc=w%mk@nPSD1AfUP4lq)*Tovfo8i>gINyTZYlLgoShTE?~>@K5Ur94-j$0EgzsK zx_E92md?8OJz?isi6g9P5o)kSuJc7Ls<$gqy>g=w%88@|I!Z0vsO&TntqHv*Rhf5G zqDnIsT2?xpuvIHjURws#U%(X?6j3CE&(1Kq;!3a7YPRkH-j(Jfpu}8v&O2l4;j7ga zm*ZlJL5l&c)t}#hb|&$_{6td5!NVZjQ858$F^pb_OQsjW5Eb(ckp2%9GX=3ykNTfz zhUiSq437EuA=PkE(!02b3f=;Bn})&x=NcT2 ze8EHH@io3l2U=~n!hq0YSjb#Js2>jvapPpwwp@J)_4q6b=goR2y&3PjejeqrpZ9ZK z-Ya-droRuMKIPw^wjT4Uv@I3MAR%WjLZgW`4d&$VZumaD*Ia{PQ_k$+XA3bEZF%(j z*oCAdbNLZBK(eR{H)k&dUogdUFhyVR0h0`y9)8Faxw8CT-Hn8F8Km(+4rqV_SQ`h6 z<_PNKkaNgx`9_|d#K#^0TtJ-|Bu$7kImj9futxrFtR<_r52PAoJ=J4v4~+G+@iCII zo*87Vw+=8vV=d_Ob`K;&0UjH<18ohcBk+P4<8FUcI?{x_{qe5iwToKN$amj|+m~67Ew_BIq zDBD}N)7zIN+x8z#4k(CQHM;Stz`!;j1DL_gLmMmYumEA*Jwk6sM_AB?jyb2{ra-2( z(==e?-bWkn6fo0W7ItlYG}JCoO}&dG)pD;Q-z6dsBwK>dgb-{D2iOSg13YmFQL-9^ zl_Y$=5%vxQEQ5XSX1!5Mn8P74znI{B;Koi89+`E(=Kct?11o1diWze1^H}l0w^+!%pJYv&WFn!wJ@__i=G@v{ z_rWf>z&_tYAyf{S&?Lm?%`i!UEWWh&YGrek=U>F$dJ;B;W$1BtEEBxqc+8%pcn?Wct`;>uK~DzifVibh4Uu8dH^*+1@xQuK3%UOJJiOIzmzk648`ul1gDkj_b8u(GkR_T3!Xa} zphm{6ZI3xy4aBxpEd-sZ1!vIipOOi(n_=??(tRxyrOM4=)WChnM!i+LBCa2z6*F;m zfaHQV?)B!=%YU(G-R^ap&ueLZiYeS0b|SC4~?Go)>5S%(VLLrWsLJ_wNZ@V6&6fvj;JVDBTdItuc9`LX1I>1nKf(z|AP7< zwLD;Ip^X;1){nK6To5)P@O2|5tm%g*55lc|xC;_6ff ze8`n)e5Jyza%%7e*i?K;huQHu_;Vl<>>LAmGRDEKTEeD7X^Esm{f+mUIBnoTBIp`{ zub61auCZFd3geq-_j#!xxGOc$a93)gO*)FEtKUPSA@@#+sc3dGaYQs5YEZPuJ1SZ) zLr0L`Li!Ck?U(|gULo#Q#zmL>-QTJSJ+EB{q?oZUzG~T`2G{E5KG$7i^J4%d$;?F5@$h=a15|k;-%`U(8of6VLp*4R?ebm99%&! z1p{rw>&$5sZ_b~6;yb6$pA8=8b7IR`WqAWPZWft)7i2ng?%ggaz*-C2`o$7KBOr63 z9iT8aNaIFbVgG2({yHBZ0JtNE5z6tg8wiL`!D}qI&$pvo0;KI)c!FZ-J=I`2zRo;O zuo<#=1chdHGeDN;0;MUMF2ReZ6`f|V$|pq>Qlxa$X$q+Aw4yU~#!HNU0pBqC7{l11 z1&$t;k^4{TqIl9jiIZZYEUu03KqU^(3Wrar-x=?r%peVI-`Z!q5(9I32hUrBa#>g5 z<+7cRmCI%d9hEd8g_oqzL&u|X9C^_X7{Qm+w{Cvy+ diff --git a/scripts/module/__init__.py b/scripts/module/__init__.py deleted file mode 100644 index c311402d4..000000000 --- a/scripts/module/__init__.py +++ /dev/null @@ -1,319 +0,0 @@ -# -# Copyright 2012-2013 The libLTE Developers. See the -# COPYRIGHT file at the top-level directory of this distribution. -# -# This file is part of the libLTE library. -# -# libLTE is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as -# published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. -# -# libLTE is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# A copy of the GNU Lesser General Public License can be found in -# the LICENSE file in the top-level directory of this distribution -# and at http://www.gnu.org/licenses/. -# - - -from xml.dom.minidom import Document,parseString -from pyclibrary import CParser - -class Module: -# name - nof_inputs=0 - nof_outputs=0 - init_params = [] - input_params = [] - output_params = [] - - def __init__(self,name): - self.name=name - self.init_func=name+'_initialize' - self.work_func=name+'_work' - self.handler=name+'_hl' - self.handler_instance=name - self.init_pm_struct=name+'_init' - self.input_pm_struct=name+'_ctrl_in' - self.output_pm_struct=name+'_ctrl_out' - self.directory_name = 'lib_' + name - self.library_name = 'osld' - - def set_input(self,nof_inputs, input_type, input_size): - self.nof_inputs = nof_inputs - self.input_type = input_type - self.input_size = input_size - - def set_output(self,nof_outputs, output_type, output_size): - self.nof_outputs = nof_outputs - self.output_type = output_type - self.output_size = output_size - - def add_init_param(self,name,type,default): - p={'name':name,'variable':self.name+'.init.'+name,'type':type,'default':default} - self.init_params.append(p) - - def add_input_param(self,name,type,default): - p={'name':name,'variable':self.name+'.ctrl_in.'+name,'type':type,'default':default} - self.input_params.append(p) - - def add_output_param(self,name,type,default): - p={'name':name,'variable':self.name+'.ctrl_in.'+name,'type':type,'default':default} - self.output_params.append(p) - - def toString(self): - s = 'name: ' + self.name + '\n' - s = s + 'handler: ' + self.handler + '\n' - s = s + 'directory: ' + self.directory_name + '\n' - s = s + 'library name: ' + self.library_name + '\n' - - if self.nof_inputs > 0: - s = s + str(self.nof_inputs) + ' ' + self.input_type + ' inputs of size ' + str(self.output.size) + '\n' - else: - s = s + 'no inputs\n' - if self.nof_outputs > 0: - s = s + str(self.nof_outputs) + ' ' + self.output_type + ' outputs of size ' + str(self.output_size) + '\n' - else: - s = s + 'no outputs\n' - - if self.init_params: - s = s + 'Initialization parameters:\n' - for p in self.init_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] + ' = ' + p['default'] + '\n' - - if self.input_params: - s = s + 'Input parameters:\n' - for p in self.input_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] - if p['default'] == None: - s = s + ' (Mandatory)\n' - else: - s = s + ' = ' + p['default'] + '\n' - - if self.output_params: - s = s + 'Output parameters:\n' - for p in self.output_params: - s = s + ' - ' + p['type'] + ' ' + p['name'] + '\n' - return s - - def toXML(self): - root = Document() - - module = root.createElement('module') - root.appendChild(module) - - module.setAttribute("name",self.name) - module.setAttribute("handler",self.handler) - module.setAttribute("handler_instance",self.handler_instance) - module.setAttribute("library",self.library_name) - module.setAttribute("directory_name",self.directory_name) - - # Functions - functions = root.createElement("functions") - module.appendChild(functions) - functions.setAttribute("initialize",self.init_func) - functions.setAttribute("work",self.work_func) - - # Interfaces - inputs = root.createElement("inputs") - module.appendChild(inputs) - inputs.setAttribute("num",str(self.nof_inputs)) - inputs.setAttribute("type",self.input_type) - inputs.setAttribute("size",self.input_size) - - outputs = root.createElement("outputs") - module.appendChild(outputs) - outputs.setAttribute("num",str(self.nof_outputs)) - outputs.setAttribute("type",self.output_type) - outputs.setAttribute("size",self.output_size) - - # Init Parameters - pinit = root.createElement("init_parameters") - module.appendChild(pinit) - for p in self.init_params: - pi = root.createElement("param") - pinit.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - pi.setAttribute("default",p['default']) - - pinput = root.createElement("input_parameters") - module.appendChild(pinput) - for p in self.input_params: - pi = root.createElement("param") - pinput.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - pi.setAttribute("default",p['default']) - - poutput = root.createElement("output_parameters") - module.appendChild(poutput) - for p in self.input_params: - pi = root.createElement("param") - pinput.appendChild(pi) - pi.setAttribute("name",p['name']) - pi.setAttribute("var",p['variable']) - pi.setAttribute("type",p['type']) - - - return root.toprettyxml() - - def readXML(self, string): - root = parseString(string) - module = root.getElementsByTagName("module").item(0) - - self.name = module.getAttribute("name") - self.handler = module.getAttribute("handler") - self.handler_instance = module.getAttribute("handler_instance") - self.directory_name = module.getAttribute("directory_name") - self.library_name = module.getAttribute("library") - - functions = root.getElementsByTagName("functions").item(0) - self.init_func = functions.getAttribute("initialize") - self.work_func = functions.getAttribute("work") - - # Interfaces - inputs = root.getElementsByTagName("inputs").item(0) - self.nof_inputs = int(inputs.getAttribute("num")) - self.input_type = inputs.getAttribute("type") - self.input_size = inputs.getAttribute("size") - - outputs = root.getElementsByTagName("outputs").item(0) - self.nof_outputs = int(outputs.getAttribute("num")) - self.output_type = outputs.getAttribute("type") - self.output_size = outputs.getAttribute("size") - - pinit = root.getElementsByTagName("init_parameters").item(0) - for p in pinit.getElementsByTagName("params"): - self.init_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"), \ - 'type':p.getAttribute("type"),\ - 'default':p.getAttribute("default")}) - - pinput = root.getElementsByTagName("input_parameters").item(0) - for p in pinput.getElementsByTagName("params"): - self.input_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"),\ - 'type':p.getAttribute("type"),\ - 'default':p.getAttribute("default")}) - - poutput = root.getElementsByTagName("output_parameters").item(0) - for p in poutput.getElementsByTagName("params"): - self.output_params.appendChild({'name':p.getAttribute("name"),\ - 'variable':p.getAttribute("variable"),\ - 'type':p.getAttribute("type")}) - def findMember(self,members, name): - for m in members: - if m[0] == name: - return m - return None - - def findDefault(self, lines, variable): - for line in lines: - if variable in line: - if 'default' in line.lower(): - return str(int(line.split('=')[1].split('*/')[0])) - else: - return None - - def findSize(self, lines, variable): - for line in lines: - if variable in line: - if 'size' in line.lower(): - return line.split('=')[1].split('*/')[0] - else: - return None - - def findLinesStruct(self, lines, struct_name): - slines = [] - state = "nf" - for line in lines: - if state == 'nf': - if 'struct ' + struct_name in line: - state = "f1" - elif state == 'f1': - if '}' in line: - state = 'f2' - return slines - else: - slines.append(line) - - def findLinesHandler(self,file_name): - with open(file_name,'r') as f: - slines = [] - state = "nf" - for line in reversed(f.readlines()): - if state == 'nf': - if self.handler in line and '}' in line: - state = "f1" - elif state == 'f1': - if 'typedef' in line and 'struct' in line: - state = 'f2' - return reversed(slines) - else: - slines.append(line) - - def readHeader(self, file_name): - - p = CParser([file_name]) - h = p.defs['structs'][p.defs['types'][p.defs['types'][self.handler][0]][1]]['members'] - - input = self.findMember(h,'input') - if input == None: - self.nof_inputs = 0 - self.input_type = None - self.input_size = 0 - else: - self.nof_inputs = input[1][2][0] - self.input_type = input[1][0] - size = self.findSize(self.findLinesHandler(file_name), 'input') - if size == None: - size = '2048*20' - self.input_size = size - - output = self.findMember(h,'output') - if output == None: - self.nof_outputs = 0 - self.output_type = None - self.output_size = 0 - else: - self.nof_outputs = output[1][2][0] - self.output_type = output[1][0] - size = self.findSize(self.findLinesHandler(file_name), 'output') - if size == None: - size = '2048*20' - self.output_size = size - - initpm = p.defs['structs'].get(self.init_pm_struct) - if (initpm != None): - for m in initpm['members']: - default = self.findDefault(self.findLinesStruct(\ - self.findLinesHandler(file_name),\ - self.init_pm_struct), m[0]) - if default == None: - default = '0' - self.init_params.append({'name':m[0],'variable':self.name+'.init.'+m[0],\ - 'type':m[1][0],'default':default}) - - - inputpm = p.defs['structs'].get(self.input_pm_struct) - if (inputpm != None): - for m in inputpm['members']: - default = self.findDefault(self.findLinesStruct(\ - self.findLinesHandler(file_name),\ - self.input_pm_struct), m[0]) - self.input_params.append({'name':m[0],'variable':self.name+'.ctrl_in.'+m[0],\ - 'type':m[1][0],'default':default}) - - outputpm = p.defs['structs'].get(self.output_pm_struct) - if (outputpm != None): - for m in outputpm['members']: - self.output_params.append({'name':m[0],'variable':self.name+'.ctrl_out.'+m[0],\ - 'type':m[1][0]}) - \ No newline at end of file diff --git a/scripts/module/__init__.pyc b/scripts/module/__init__.pyc deleted file mode 100644 index 40904f373e0a6a0de17bcb7600c2ab3a2f3947c8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9701 zcmd5?&2t>Z74Ml{ee8PmZCer;vIdi^DJd%zjya}MmC7ZRL+%Op1f7A@>F8!Co?{oOZ&!A}Fzvo)cwLN#)b8X*U_MJzn?^-$6&b!Nb zv~#X8>Kb|X9p^rB?ybust})`)D`Vi${3rfZeAJS06sz6(c024O$ssg)wI~kPlc?Fb zR`J+_FTC(7sZlbD`h{*|yA}3DW1n;Q#=k@nI!XjlAQ8))yM{8aazrIW@+i{fF^bE| zxPz#y6jEs~7bz(zhd2dRO0F@c%7kl-t1{^t1y!b8qo~TXYaCK##x+W+%(}*eDs!$e zsmfv3m{R443)U;skhqDjt5X%(_XX7B4^Z6l+%4y-szB&@HbBDsd~5h> z8+AmD5o?U9F=`FWe~fLhg$~9w7(s+!5=6b+YggkW+O8+blt{hpq&lGSUC6SR$afpfD6A*l=w?;2 zlt6;4Q7t9dBIOL?u(c8JaRpy1;wMmBzTRzz%gwl53tP(+e3?@`1t!aJJ!u2HQVXZY7#W7JVL-rfpl# zt-y?%!qR!}PGHl@Em$>=A0X<8TiHcVbn)CKY@c@@dBWcH3YEF06{^9OxGR>l(B7*> z&Dzygs4XXLa9cIPjoNlA(JIkv(wXh0T2yPtLfcxe7j_yI%4-XQTOc^j;vp0X;kS1n zO5--DH5#@l;00>!6QIOglFmD2E98q+8yDzOieXDZ8Lh!Yhtxz3*cOtMaX>lms-fT! zKE?NQV+L`}6eJj- zbgjGQWX-Q)pA8eutzzd56V1JD;)U#W?9V)7hZ}hp{e)-@PstIqUFYF=>2){ZiQ#tV z(`RFDbDX<5W6~+gT^CFsbcb&?j(;2CSg==8jzzqUZ_4q|9oQY}vn#piya&+`tZ>RZ z=OkgttzLQD?OJFt0Xcy^lWx&uMtzF)X`|1uZVKC)^e%SbmmubZi+(Gxr!=xiE)FR6b926fS8dCrv zH)0c#1LCZMW9-^@y1~P2eg%bktaUqKa1t#8UB)BwGx5P;={R|I?U->I_4siV&YSU$ z@Z(Q;Px(b}%3JY^e!(kx)7}&5?}MmM_z$G5lU|*xg4UsH5}>>YMJGD*7?1!#fG^nC zwyEf~eKtM(05h=iQe?m(DkUkF+Z=Hp4M^tnM~6uUDPzMVMEKZ&fDyik2mwRmIfe#D zjE13dHgrxyE9M8TsJoF0;!qssfCe}~0OMdG2OLrW;J6vSgFcmA62!;Vlx zfI}!%Y9K{hA&?G_Iz<8yv$IbB_)3?r(<>w8^f-jQ>N?wPgUO|gX9~aRNfIa10k-PM ztENB>p|u5y3Wh1vP-G}DP6+76aEOh(OMoXXA?DVju$F}1XodYWh{fG6TyM4-iQqd- zo==b!xw@T%`_y2#D+DI5fIcW-)U)ppcX!YwekI*r6eh{dop!6jwxWH(an$W5!C8_B zlEK$lJcYtk!2^SEIpGPl*Oa+>(t_;Mv$-Eg1Wb%gx|c4zypBqI6$MU@0P6{V&PzWz zv{={gB5Qo^sV3SqV12?ntj~g;eHLXQSLd|$0Q{dp0l;G~6Ey_qeT)ah0?_GB!k$1Fqi7eC zV*48JDmEOvJ0cv<@D=joKl=mKGK5VjfLfynyV++llShy{;5jOufK>Pia}UOIN;nH5 zyn|7uGfJN)5Z2}>Xddt!b34}vs+>OH-UA@LDddI=nsu9VI!7BwOYb^9TYHw>0op4m zuxoV4khSOZe9o<)o455{fVD{=upUTIkOTm{J4kT&ASEaP3QYn*@<4*3+qvj&6^A8w zG1I{jNr1KYAL>AJ{+@pkfp-eK+VKir_rr~l=BwBhh6hZ71uHCs3{SFloCV=jKoLBR zn&E;H3j@Rqm`@I#MIo#Y7)&MPe=ST>G>p&Pe7$yUjfp}6cryvx0z~wSc1SVnu0Nt6?4hwZC4m}n&_lvn2Bb7w?&blPZk z8W+U=H)zES>>U7p+8g)!6YIz3Sl({uicRc|G%5zX-WYY!C1n8swF=!yd7GyWc(oLN z1?#|*MdFzt9ND-!Gys!sWRW%gIeC3M<(&6Ih5 zuQH(xAp5W~&#MW{9HXYmG$y}AnCB(2QiB8mf1 z5my#fw2t%Z?@`~Qp9f7pl)A5<57D^hs2C^%=ck?PrmJ^#;z54M({6fRM?hZbjA$Xo zypKZnt=Mhh0D*p3pxAY|M1qI6K6Pe-c_hoZbyG7Q2*c<2lI=^WP*GWDBPM*<{YY7) zjWgPZyVf{sJNE=SDUWQaUMf*ZVSCYTNXRwjx{Gb4CJ0;)PRUFrP{}WI036S8pYsO} z$diEcM)Bc#oW_@*Q7`W+KBG{ykI?H^s3h5~8BT5A|Cmu)Y6)MK_#wA8y(?BD_B(NI zH?xXi+@=mZaHOX2PiTcfJud@&;aQ(fVyEr z=OQ`}+08OGdu?qer)DcOBNMGQ0$e@1;9V96wuyK`>f84vyV(0HXff}b9Ds%IT2%?!~;_X~!;%m1>;q+1r&V2c+tcLgy1Q>B~5!(m_u$wVD=qRy8m0IDJ z!iKZ# z6i_Of4K*xU@Q=_;DQ_o;`RHYijMUYlX0&+9VljgyS7%m9x_WT(XWG_KD~ffV)% z8BV3q{Vy!dyMG#%mw~3FR!-wnf9JbuY8oHjYZ_p%e;SY&LHS)IW;WZVaW9D@U-|X@ z5+5aa?ZT9>Kb8h0-pcK~p{6A69}MOt1D42bTATSy8H7+LavU53tXG&gUK0!%^q3bQ zB*ltr${}R4o^mhyuzk|Xd>#>4ULrFL9CI1N0-h^A+!{3EUK%NmGA636o-tpWp4Q=1r zr#v0x(!X!u4M?@BKQgFRErnODnoW*DjzQXAsto2yWpReZODtYt zAsfq5GOEla%WmT--x@0Z`#;nO;ArG_VzM0d>7MlQ3#GycK6!l}E<9IwtT0*_$7ixI zwPxef;>sZ+r?%ASwwKzlKYl5?v$JjywcgEomc-;*){qA_0a@bfru%0(8C?w>^``xQ E0ozgJ#Q*>R diff --git a/scripts/module/pyclibrary/CLibrary.py b/scripts/module/pyclibrary/CLibrary.py deleted file mode 100644 index 17424bc4e..000000000 --- a/scripts/module/pyclibrary/CLibrary.py +++ /dev/null @@ -1,501 +0,0 @@ -# -*- coding: utf-8 -*- -""" -CLibrary.py - Provides CLibrary class -Copyright 2010 Luke Campagnola -Distributed under MIT/X11 license. See license.txt for more infomation. - -Proxy to both CHeader and ctypes, allowing automatic type conversion and -function calling based on C header definitions. -""" - - -from ctypes import * -import sys - - -class CLibrary: - """The CLibrary class is intended to automate much of the work in using ctypes by integrating - header file definitions from CParser. Ths class serves as a proxy to a ctypes, adding - a few features: - - allows easy access to values defined via CParser - - automatic type conversions for function calls using CParser function signatures - - creates ctype classes based on type definitions from CParser - - Initialize using a ctypes shared object and a CParser: - headers = CParser.winDefs() - lib = CLibrary(windll.User32, headers) - - There are 3 ways to access library elements: - lib(type, name) - type can be one of 'values', 'functions', 'types', 'structs', 'unions', or 'enums'. - Returns an object matching name. For values, the value from the headers is - returned. For functions, a callable object is returned that handles automatic - type conversion for arguments and return values. for structs, types, and enums, - a ctypes class is returned matching the type specified. - lib.name - searches in order through values, functions, types, structs, unions, and enums from - header definitions and returns an object for the first match found. The object - returned is the same as returned by lib(type, name). This is the preferred way to access - elements from CLibrary, but may not work in some situations (for example, if - a struct and variable share the same name). - lib[type] - Accesses the header definitions directly, returns definition dictionaries - based on the type requested. This is equivalent to headers.defs[type]. - """ - Null = object() - - cTypes = { - 'char': c_char, - 'wchar': c_wchar, - 'unsigned char': c_ubyte, - 'short': c_short, - 'short int': c_short, - 'unsigned short': c_ushort, - 'unsigned short int': c_ushort, - 'int': c_int, - 'unsigned': c_uint, - 'unsigned int': c_uint, - 'long': c_long, - 'long int': c_long, - 'unsigned long': c_ulong, - 'unsigned long int': c_ulong, - '__int64': c_longlong, - 'long long': c_longlong, - 'long long int': c_longlong, - 'unsigned __int64': c_ulonglong, - 'unsigned long long': c_ulonglong, - 'unsigned long long int': c_ulonglong, - 'float': c_float, - 'double': c_double, - 'long double': c_longdouble - } - cPtrTypes = { - 'char': c_char_p, - 'wchar': c_wchar_p, - 'void': c_void_p - } - - - - def __init__(self, lib, headers, prefix=None): - ## name everything using underscores to avoid name collisions with library - - self._lib_ = lib - self._headers_ = headers - self._defs_ = headers.defs - if prefix is None: - self._prefix_ = [] - elif type(prefix) is list: - self._prefix_ = prefix - else: - self._prefix_ = [prefix] - self._objs_ = {} - for k in ['values', 'functions', 'types', 'structs', 'unions', 'enums']: - self._objs_[k] = {} - self._allObjs_ = {} - self._structs_ = {} - self._unions_ = {} - - def __call__(self, typ, name): - if typ not in self._objs_: - typs = self._objs_.keys() - raise Exception("Type must be one of %s" % str(typs)) - - if name not in self._objs_[typ]: - self._objs_[typ][name] = self._mkObj_(typ, name) - - return self._objs_[typ][name] - - def _allNames_(self, name): - return [name] + [p + name for p in self._prefix_] - - def _mkObj_(self, typ, name): - names = self._allNames_(name) - - for n in names: - if n in self._objs_: - return self._objs_[n] - - for n in names: ## try with and without prefix - if n not in self._defs_[typ] and not (typ in ['structs', 'unions', 'enums'] and n in self._defs_['types']): - continue - - if typ == 'values': - return self._defs_[typ][n] - elif typ == 'functions': - return self._getFunction(n) - elif typ == 'types': - obj = self._defs_[typ][n] - return self._ctype(obj) - elif typ == 'structs': - return self._cstruct('structs', n) - elif typ == 'unions': - return self._cstruct('unions', n) - elif typ == 'enums': - ## Allow automatic resolving of typedefs that alias enums - if n not in self._defs_['enums']: - if n not in self._defs_['types']: - raise Exception('No enums named "%s"' % n) - typ = self._headers_.evalType([n])[0] - if typ[:5] != 'enum ': - raise Exception('No enums named "%s"' % n) - n = self._defs_['types'][typ][1] ## look up internal name of enum - obj = self._defs_['enums'][n] - - return obj - else: - raise Exception("Unknown type %s" % typ) - raise NameError(name) - - - def __getattr__(self, name): - """Used to retrieve any type of definition from the headers. Searches for the name in this order: - values, functions, types, structs, unions, enums.""" - if name not in self._allObjs_: - names = self._allNames_(name) - for k in ['values', 'functions', 'types', 'structs', 'unions', 'enums', None]: - if k is None: - raise NameError(name) - obj = None - for n in names: - if n in self._defs_[k]: - obj = self(k, n) - break - if obj is not None: - break - self._allObjs_[name] = obj - return self._allObjs_[name] - - def __getitem__(self, name): - """Used to retrieve a specific dictionary from the headers.""" - return self._defs_[name] - - def __repr__(self): - return "" % str(self._lib_) - - def _getFunction(self, funcName): - try: - func = getattr(self._lib_, funcName) - except: - raise Exception("Function name '%s' appears in headers but not in library!" % func) - - #print "create function %s," % (funcName), self._defs_['functions'][funcName] - return CFunction(self, func, self._defs_['functions'][funcName], funcName) - - def _ctype(self, typ, pointers=True): - """return a ctype object representing the named type. - If pointers is True, the class returned includes all pointer/array specs provided. - Otherwise, the class returned is just the base type with no pointers.""" - try: - typ = self._headers_.evalType(typ) - mods = typ[1:][:] - - ## Create the initial type - ## Some types like ['char', '*'] have a specific ctype (c_char_p) - ## (but only do this if pointers == True) - if pointers and len(typ) > 1 and typ[1] == '*' and typ[0] in CLibrary.cPtrTypes: - cls = CLibrary.cPtrTypes[typ[0]] - mods = typ[2:] - - ## If the base type is in the list of existing ctypes: - elif typ[0] in CLibrary.cTypes: - cls = CLibrary.cTypes[typ[0]] - - ## structs, unions, enums: - elif typ[0][:7] == 'struct ': - cls = self._cstruct('structs', self._defs_['types'][typ[0]][1]) - elif typ[0][:6] == 'union ': - cls = self._cstruct('unions', self._defs_['types'][typ[0]][1]) - elif typ[0][:5] == 'enum ': - cls = c_int - - ## void - elif typ[0] == 'void': - cls = None - else: - #print typ - raise Exception("Can't find base type for %s" % str(typ)) - - if not pointers: - return cls - - ## apply pointers and arrays - while len(mods) > 0: - m = mods.pop(0) - if isinstance(m, basestring): ## pointer or reference - if m[0] == '*' or m[0] == '&': - for i in m: - cls = POINTER(cls) - elif type(m) is list: ## array - for i in m: - if i == -1: ## -1 indicates an 'incomplete type' like "int variable[]" - cls = POINTER(cls) ## which we should interpret like "int *variable" - else: - cls = cls * i - elif type(m) is tuple: ## Probably a function pointer - ## Find pointer and calling convention - isPtr = False - conv = '__cdecl' - if len(mods) == 0: - raise Exception("Function signature with no pointer:", m, mods) - for i in [0,1]: - if len(mods) < 1: - break - if mods[0] == '*': - mods.pop(0) - isPtr = True - elif mods[0] in ['__stdcall', '__cdecl']: - conv = mods.pop(0) - else: - break - if not isPtr: - raise Exception("Not sure how to handle type (function without single pointer): %s" % str(typ)) - - if conv == '__stdcall': - mkfn = WINFUNCTYPE - else: - mkfn = CFUNCTYPE - #print "Create function pointer (%s)" % conv - - args = [self._ctype(arg[1]) for arg in m] - cls = mkfn(cls, *args) - - else: - raise Exception("Not sure what to do with this type modifier: '%s'" % str(p)) - return cls - except: - print "Error while processing type", typ - raise - - def _cstruct(self, strType, strName): - if strName not in self._structs_: - - ## Resolve struct name--typedef aliases allowed. - if strName not in self._defs_[strType]: - if strName not in self._defs_['types']: - raise Exception('No struct/union named "%s"' % strName) - typ = self._headers_.evalType([strName])[0] - if typ[:7] != 'struct ' and typ[:6] != 'union ': - raise Exception('No struct/union named "%s"' % strName) - strName = self._defs_['types'][typ][1] - - ## Pull struct definition - defn = self._defs_[strType][strName] - - - ## create ctypes class - defs = defn['members'][:] - if strType == 'structs': - class s(Structure): - def __repr__(self): - return "" % strName - elif strType == 'unions': - class s(Union): - def __repr__(self): - return "" % strName - - - ## must register struct here to allow recursive definitions. - self._structs_[strName] = s - - if defn['pack'] is not None: - s._pack_ = defn['pack'] - - ## assign names to anonymous members - members = [] - anon = [] - for i in range(len(defs)): - if defs[i][0] is None: - c = 0 - while True: - name = 'anon_member%d' % c - if name not in members: - defs[i][0] = name - anon.append(name) - break - members.append(defs[i][0]) - - s._anonymous_ = anon - s._fields_ = [(m[0], self._ctype(m[1])) for m in defs] - s._defaults_ = [m[2] for m in defs] - return self._structs_[strName] - - - -class CFunction: - def __init__(self, lib, func, sig, name): - self.lib = lib - self.func = func - #print sig - self.sig = list(sig) # looks like [return_type, [(argName, type, default), (argName, type, default), ...]] - self.sig[1] = [s for s in sig[1] if s[1] != ['void']] ## remove void args from list - for conv in ['__stdcall', '__cdecl']: - if conv in self.sig[0]: - self.sig[0].remove(conv) - self.name = name - self.restype = lib._ctype(self.sig[0]) - #func.restype = self.restype - self.argTypes = [lib._ctype(s[1]) for s in self.sig[1]] - func.argtypes = self.argTypes - self.reqArgs = [x[0] for x in self.sig[1] if x[2] is None] - self.argInds = dict([(self.sig[1][i][0], i) for i in range(len(self.sig[1]))]) ## mapping from argument names to indices - #print "created func", self, sig, self.argTypes - - def argCType(self, arg): - """Return the ctype required for the specified argument. - arg can be either an integer or the name of the argument. - """ - if isinstance(arg, basestring): - arg = self.argInds[arg] - return self.lib._ctype(self.sig[1][arg][1]) - - def __call__(self, *args, **kwargs): - """Invoke the SO or dll function referenced, converting all arguments to the correct type. - Keyword arguments are allowed as long as the header specifies the argument names. - Arguments which are passed byref may be omitted entirely, and will be automaticaly generated. - To pass a NULL pointer, give None as the argument. - Returns the return value of the function call as well as all of the arguments (so that objects passed by reference can be retrieved)""" - #print "CALL: %s(%s)" % (self.name, ", ".join(map(str, args) + ["%s=%s" % (k, str(kwargs[k])) for k in kwargs])) - #print " sig:", self.sig - argList = [None] * max(len(self.reqArgs), len(args)) ## We'll need at least this many arguments. - - ## First fill in args - for i in range(len(args)): - #argList[i] = self.argTypes[i](args[i]) - if args[i] is None: - argList[i] = self.lib.Null - else: - argList[i] = args[i] - - ## Next fill in kwargs - for k in kwargs: - #print " kw:", k - if k not in self.argInds: - print "Function signature:", self.prettySignature() - raise Exception("Function signature has no argument named '%s'" % k) - ind = self.argInds[k] - if ind >= len(argList): ## stretch argument list if needed - argList += [None] * (ind - len(argList) + 1) - #argList[ind] = self.coerce(kwargs[k], self.argTypes[ind]) - if kwargs[k] is None: - argList[ind] = self.lib.Null - else: - argList[ind] = kwargs[k] - - guessedArgs = [] - ## Finally, fill in remaining arguments if they are pointers to int/float/void*/struct values - ## (we assume these are to be modified by the function and their initial value is not important) - for i in range(len(argList)): - if argList[i] is None or argList[i] is self.lib.Null: - try: - sig = self.sig[1][i][1] - argType = self.lib._headers_.evalType(sig) - if argList[i] is self.lib.Null: ## request to build a null pointer - if len(argType) < 2: - raise Exception("Can not create NULL for non-pointer argument type: %s" % str(argType)) - argList[i] = self.lib._ctype(sig)() - #elif argType == ['char', '*']: ## pass null pointer if none was specified. This is a little dangerous, but some functions will expect it. - #argList[i] = c_char_p() ## On second thought: let's just require the user to explicitly ask for a NULL pointer. - else: - if argType == ['void', '**'] or argType == ['void', '*', '*']: - cls = c_void_p - else: - assert len(argType) == 2 and argType[1] == '*' ## Must be 2-part type, second part must be '*' - cls = self.lib._ctype(sig, pointers=False) - argList[i] = pointer(cls(0)) - guessedArgs.append(i) - except: - if sys.exc_info()[0] is not AssertionError: - raise - #sys.excepthook(*sys.exc_info()) - print "Function signature:", self.prettySignature() - raise Exception("Function call '%s' missing required argument %d '%s'. (See above for signature)" % (self.name, i, self.sig[1][i][0])) - #print " args:", argList - try: - res = self.func(*argList) - except: - print "Function call failed. Signature is:", self.prettySignature() - print "Arguments:", argList - print "Argtypes:", self.func.argtypes - raise - #print " result:", res - - cr = CallResult(res, argList, self.sig, guessed=guessedArgs) - return cr - - def prettySignature(self): - return "%s %s(%s)" % (''.join(self.sig[0]), self.name, ', '.join(["%s %s" % ("".join(map(str, s[1])), s[0]) for s in self.sig[1]])) - -class CallResult: - """Class for bundling results from C function calls. Allows access to the function - return value as well as all of the arguments, since the function call will often return - extra values via these arguments. - - Original ctype objects can be accessed via result.rval or result.args - - Python values carried by these objects can be accessed using () - To access values: - - The return value: () - - The nth argument passed: [n] - - The argument by name: ['name'] - - All values that were auto-generated: .auto() - - The class can also be used as an iterator, so that tuple unpacking is possible: - ret, arg1, arg2 = lib.runSomeFunction(...) - """ - def __init__(self, rval, args, sig, guessed): - self.rval = rval ## return value of function call - self.args = args ## list of arguments to function call - self.sig = sig ## function signature - self.guessed = guessed ## list of arguments that were generated automatically (usually byrefs) - - def __call__(self): - #print "Clibrary:", type(self.rval), self.mkVal(self.rval) - if self.sig[0] == ['void']: - return None - return self.mkVal(self.rval) - - def __getitem__(self, n): - if type(n) is int: - return self.mkVal(self.args[n]) - elif type(n) is str: - ind = self.findArg(n) - return self.mkVal(self.args[ind]) - else: - raise Exception("Index must be int or str.") - - def __setitem__(self, n, val): - if type(n) is int: - self.args[n] = val - elif type(n) is str: - ind = self.findArg(n) - self.args[ind] = val - else: - raise Exception("Index must be int or str.") - - - def mkVal(self, obj): - while not hasattr(obj, 'value'): - if not hasattr(obj, 'contents'): - return obj - try: - obj = obj.contents - except ValueError: - return None - - return obj.value - - - def findArg(self, arg): - for i in range(len(self.sig[1])): - if self.sig[1][i][0] == arg: - return i - raise Exception("Can't find argument '%s' in function signature. Arguments are: %s" % (arg, str([a[0] for a in self.sig[1]]))) - - def __iter__(self): - yield self() - for i in range(len(self.args)): - yield(self[i]) - - def auto(self): - return [self[n] for n in self.guessed] - - - - - diff --git a/scripts/module/pyclibrary/CParser.py b/scripts/module/pyclibrary/CParser.py deleted file mode 100644 index df2b4dea6..000000000 --- a/scripts/module/pyclibrary/CParser.py +++ /dev/null @@ -1,1274 +0,0 @@ -# -*- coding: utf-8 -*- -""" -CParser.py - C parsing library -Copyright 2010 Luke Campagnola -Distributed under MIT/X11 license. See license.txt for more infomation. - -Used for extracting data such as macro definitions, variables, typedefs, and function -signatures from C files (preferrably header files). -""" - -import sys, re, os - -__all__ = ['winDefs', 'CParser'] - - -def winDefs(verbose=False): - """Convenience function. Returns a parser which loads a selection of windows headers included with - CParser. These definitions can either be accessed directly or included before parsing - another file like this: - windefs = CParser.winDefs() - p = CParser.CParser("headerFile.h", copyFrom=windefs) - Definitions are pulled from a selection of header files included in Visual Studio - (possibly not legal to distribute? Who knows.), some of which have been abridged - because they take so long to parse. - """ - headerFiles = ['WinNt.h', 'WinDef.h', 'WinBase.h', 'BaseTsd.h', 'WTypes.h', 'WinUser.h'] - d = os.path.dirname(__file__) - p = CParser( - [os.path.join(d, 'headers', h) for h in headerFiles], - types={'__int64': ('long long')}, - macros={'_WIN32': '', '_MSC_VER': '800', 'CONST': 'const', 'NO_STRICT': None}, - processAll=False - ) - p.processAll(cache=os.path.join(d, 'headers', 'WinDefs.cache'), noCacheWarning=True, verbose=verbose) - return p - - -class CParser(): - """Class for parsing C code to extract variable, struct, enum, and function declarations as well as preprocessor macros. This is not a complete C parser; instead, it is meant to simplify the process - of extracting definitions from header files in the absence of a complete build system. Many files - will require some amount of manual intervention to parse properly (see 'replace' and extra arguments - to __init__) - - Usage: - ## create parser object, load two files - p = CParser(['header1.h', 'header2.h']) - - ## remove comments, preprocess, and search for declarations - p.processAll() - - ## just to see what was successfully parsed from the files - p.printAll() - - ## access parsed declarations - allValues = p.defs['values'] - functionSignatures = p.defs['functions'] - ... - - ## To see what was not successfully parsed: - unp = p.processAll(returnUnparsed=True) - for s in unp: - print s - """ - - cacheVersion = 22 ## increment every time cache structure or parsing changes to invalidate old cache files. - - def __init__(self, files=None, replace=None, copyFrom=None, processAll=True, cache=None, verbose=False, **args): - """Create a C parser object fiven a file or list of files. Files are read to memory and operated - on from there. - 'copyFrom' may be another CParser object from which definitions should be copied. - 'replace' may be specified to perform string replacements before parsing. - format is {'searchStr': 'replaceStr', ...} - Extra parameters may be used to specify the starting state of the parser. For example, - one could provide a set of missing type declarations by - types={'UINT': ('unsigned int'), 'STRING': ('char', 1)} - Similarly, preprocessor macros can be specified: - macros={'WINAPI': ''} - """ - - - self.defs = {} ## holds all definitions - self.fileDefs = {} ## holds definitions grouped by the file they came from - - self.initOpts = args.copy() - self.initOpts['files'] = [] - self.initOpts['replace'] = {} - - self.dataList = ['types', 'variables', 'fnmacros', 'macros', 'structs', 'unions', 'enums', 'functions', 'values'] - - self.verbose = False - - # placeholders for definitions that change during parsing - #if hasPyParsing: - #self.macroExpr = Forward() - #self.fnMacroExpr = Forward() - #self.definedType = Forward() - #self.definedStruct = Forward() - #self.definedEnum = Forward() - - self.fileOrder = [] - self.files = {} - self.packList = {} ## list describing struct packing rules as defined by #pragma pack - if files is not None: - if type(files) is str: - files = [files] - for f in files: - self.loadFile(f, replace) - - ## initialize empty definition lists - for k in self.dataList: - self.defs[k] = {} - #for f in files: - #self.fileDefs[f][k] = {} - - self.compiledTypes = {} ## holds translations from typedefs/structs/unions to fundamental types - - self.currentFile = None - - # Import extra arguments if specified - for t in args: - for k in args[t].keys(): - self.addDef(t, k, args[t][k]) - - # Import from other CParsers if specified - if copyFrom is not None: - if type(copyFrom) not in [list, tuple]: - copyFrom = [copyFrom] - for p in copyFrom: - self.importDict(p.fileDefs) - - if processAll: - self.processAll(cache=cache, verbose=verbose) - - def processAll(self, cache=None, returnUnparsed=False, printAfterPreprocess=False, noCacheWarning=True, verbose=False): - """Remove comments, preprocess, and parse declarations from all files. (operates in memory; does not alter the original files) - Returns a list of the results from parseDefs. - 'cache' may specify a file where cached results are be stored or retrieved. The cache - is automatically invalidated if any of the arguments to __init__ are changed, or if the - C files are newer than the cache. - 'returnUnparsed' is passed directly to parseDefs. - 'printAfterPreprocess' is for debugging; prints the result of preprocessing each file.""" - self.verbose = verbose - if cache is not None and self.loadCache(cache, checkValidity=True): - if verbose: - print "Loaded cached definitions; will skip parsing." - return ## cached values loaded successfully, nothing left to do here - #else: - #print "No cache.", cache - - - results = [] - if noCacheWarning or verbose: - print "Parsing C header files (no valid cache found). This could take several minutes..." - for f in self.fileOrder: - #fn = os.path.basename(f) - if self.files[f] is None: - ## This means the file could not be loaded and there was no cache. - raise Exception('Could not find header file "%s" or a suitable cache file.' % f) - if verbose: - print "Removing comments from file '%s'..." % f - self.removeComments(f) - if verbose: - print "Preprocessing file '%s'..." % f - self.preprocess(f) - if printAfterPreprocess: - print "===== PREPROCSSED %s =======" % f - print self.files[f] - if verbose: - print "Parsing definitions in file '%s'..." % f - results.append(self.parseDefs(f, returnUnparsed)) - - if cache is not None: - if verbose: - print "Writing cache file '%s'" % cache - self.writeCache(cache) - - return results - - - def loadCache(self, cacheFile, checkValidity=False): - """Load a cache file. Used internally if cache is specified in processAll(). - if checkValidity=True, then run several checks before loading the cache: - - cache file must not be older than any source files - - cache file must not be older than this library file - - options recorded in cache must match options used to initialize CParser""" - - ## make sure cache file exists - if type(cacheFile) is not str: - raise Exception("cache file option must be a string.") - if not os.path.isfile(cacheFile): - d = os.path.dirname(__file__) ## If file doesn't exist, search for it in this module's path - cacheFile = os.path.join(d, "headers", cacheFile) - if not os.path.isfile(cacheFile): - if self.verbose: - print "Can't find requested cache file." - return False - - ## make sure cache is newer than all input files - if checkValidity: - mtime = os.stat(cacheFile).st_mtime - for f in self.fileOrder: - ## if file does not exist, then it does not count against the validity of the cache. - if os.path.isfile(f) and os.stat(f).st_mtime > mtime: - if self.verbose: - print "Cache file is out of date." - return False - - try: - ## read cache file - import pickle - cache = pickle.load(open(cacheFile, 'rb')) - - ## make sure __init__ options match - if checkValidity: - if cache['opts'] != self.initOpts: - if self.verbose: - print "Cache file is not valid--created using different initialization options." - print cache['opts'] - print self.initOpts - return False - elif self.verbose: - print "Cache init opts are OK:" - print cache['opts'] - if cache['version'] < self.cacheVersion: - if self.verbose: - print "Cache file is not valid--cache format has changed." - return False - - ## import all parse results - self.importDict(cache['fileDefs']) - return True - except: - print "Warning--cache read failed:" - sys.excepthook(*sys.exc_info()) - return False - - def importDict(self, data): - """Import definitions from a dictionary. The dict format should be the - same as CParser.fileDefs. Used internally; does not need to be called - manually.""" - for f in data.keys(): - self.currentFile = f - for k in self.dataList: - for n in data[f][k]: - self.addDef(k, n, data[f][k][n]) - - def writeCache(self, cacheFile): - """Store all parsed declarations to cache. Used internally.""" - cache = {} - cache['opts'] = self.initOpts - cache['fileDefs'] = self.fileDefs - cache['version'] = self.cacheVersion - #for k in self.dataList: - #cache[k] = getattr(self, k) - import pickle - pickle.dump(cache, open(cacheFile, 'wb')) - - def loadFile(self, file, replace=None): - """Read a file, make replacements if requested. Called by __init__, should - not be called manually.""" - if not os.path.isfile(file): - ## Not a fatal error since we might be able to function properly if there is a cache file.. - #raise Exception("File %s not found" % file) - print "Warning: C header '%s' is missing; this may cause trouble." % file - self.files[file] = None - return False - - fd = open(file, 'rU') ## U causes all newline types to be converted to \n - self.files[file] = fd.read() - fd.close() - - if replace is not None: - for s in replace: - self.files[file] = re.sub(s, replace[s], self.files[file]) - self.fileOrder.append(file) - bn = os.path.basename(file) - self.initOpts['replace'][bn] = replace - self.initOpts['files'].append(bn) # only interested in the file names; the directory may change between systems. - return True - - - - - - #### Beginning of processing functions - - def assertPyparsing(self): - """Make sure pyparsing module is available.""" - global hasPyParsing - if not hasPyParsing: - raise Exception("CParser class requires 'pyparsing' library for actual parsing work. Without this library, CParser can only be used with previously cached parse results.") - - - def removeComments(self, file): - """Remove all comments from file. (operates in memory; does not alter the original files)""" - self.assertPyparsing() - text = self.files[file] - cplusplusLineComment = Literal("//") + restOfLine - # match quoted strings first to prevent matching comments inside quotes - self.files[file] = (quotedString | cStyleComment.suppress() | cplusplusLineComment.suppress()).transformString(text) - - - def preprocess(self, file): - """Scan named file for preprocessor directives, removing them while expanding macros. (operates in memory; does not alter the original files)""" - self.assertPyparsing() - self.buildParser() ## we need this so that evalExpr works properly - self.currentFile = file - packStack = [(None,None)] ## stack for #pragma pack push/pop - self.packList[file] = [(0,None)] - packing = None ## current packing value - - text = self.files[file] - - ## First join together lines split by \\n - text = Literal('\\\n').suppress().transformString(text) - - #self.ppDirective = Combine("#" + Word(alphas).leaveWhitespace()) + restOfLine - - # define the structure of a macro definition - name = Word(alphas+'_', alphanums+'_')('name') - self.ppDefine = name.setWhitespaceChars(' \t')("macro") + Optional(lparen + delimitedList(name) + rparen).setWhitespaceChars(' \t')('args') + SkipTo(LineEnd())('value') - self.ppDefine.setParseAction(self.processMacroDefn) - - #self.updateMacroDefns() - #self.updateFnMacroDefns() - - # define pattern for scanning through the input string - #self.macroExpander = (self.macroExpr | self.fnMacroExpr) - - ## Comb through lines, process all directives - lines = text.split('\n') - - result = [] - #macroExpander = (quotedString | self.macroExpander) - directive = re.compile(r'\s*#([a-zA-Z]+)(.*)$') - ifTrue = [True] - ifHit = [] - for i in range(len(lines)): - line = lines[i] - newLine = '' - m = directive.match(line) - if m is None: # regular code line - if ifTrue[-1]: # only include if we are inside the correct section of an IF block - #line = macroExpander.transformString(line) # expand all known macros - newLine = self.expandMacros(line) - else: # macro line - d = m.groups()[0] - rest = m.groups()[1] - - #print "PREPROCESS:", d, rest - if d == 'ifdef': - d = 'if' - rest = 'defined '+rest - elif d == 'ifndef': - d = 'if' - rest = '!defined '+rest - - ## Evaluate 'defined' operator before expanding macros - if d in ['if', 'elif']: - def pa(t): - return ['0', '1'][t['name'] in self.defs['macros'] or t['name'] in self.defs['fnmacros']] - rest = ( - Keyword('defined') + - (name | lparen + name + rparen) - ).setParseAction(pa).transformString(rest) - elif d in ['define', 'undef']: - macroName, rest = re.match(r'\s*([a-zA-Z_][a-zA-Z0-9_]*)(.*)$', rest).groups() - - ## Expand macros if needed - if rest is not None and (all(ifTrue) or d in ['if', 'elif']): - rest = self.expandMacros(rest) - - if d == 'elif': - if ifHit[-1] or not all(ifTrue[:-1]): - ev = False - else: - ev = self.evalPreprocessorExpr(rest) - if self.verbose: - print " "*(len(ifTrue)-2) + line, rest, ev - ifTrue[-1] = ev - ifHit[-1] = ifHit[-1] or ev - elif d == 'else': - if self.verbose: - print " "*(len(ifTrue)-2) + line, not ifHit[-1] - ifTrue[-1] = (not ifHit[-1]) and all(ifTrue[:-1]) - ifHit[-1] = True - elif d == 'endif': - ifTrue.pop() - ifHit.pop() - if self.verbose: - print " "*(len(ifTrue)-1) + line - elif d == 'if': - if all(ifTrue): - ev = self.evalPreprocessorExpr(rest) - else: - ev = False - if self.verbose: - print " "*(len(ifTrue)-1) + line, rest, ev - ifTrue.append(ev) - ifHit.append(ev) - elif d == 'define': - if not ifTrue[-1]: - continue - if self.verbose: - print " "*(len(ifTrue)) + "define:", macroName, rest - try: - self.ppDefine.parseString(macroName+ ' ' + rest) ## macro is registered here - except: - print "Error processing macro definition:", macroName, rest - print " ", sys.exc_info()[1] - elif d == 'undef': - if not ifTrue[-1]: - continue - try: - self.remDef('macros', macroName.strip()) - #self.macroListString = '|'.join(self.defs['macros'].keys() + self.defs['fnmacros'].keys()) - #self.updateMacroDefns() - except: - if sys.exc_info()[0] is not KeyError: - sys.excepthook(*sys.exc_info()) - print "Error removing macro definition '%s'" % macroName.strip() - elif d == 'pragma': ## Check for changes in structure packing - if not ifTrue[-1]: - continue - m = re.match(r'\s+pack\s*\(([^\)]+)\)', rest) - if m is None: - continue - opts = [s.strip() for s in m.groups()[0].split(',')] - - pushpop = id = val = None - for o in opts: - if o in ['push', 'pop']: - pushpop = o - elif o.isdigit(): - val = int(o) - else: - id = o - - if val is not None: - packing = val - - if pushpop == 'push': - packStack.append((packing, id)) - elif opts[0] == 'pop': - if id is None: - packStack.pop() - else: - ind = None - for i in range(len(packStack)): - if packStack[i][1] == id: - ind = i - break - if ind is not None: - packStack = packStack[:ind] - if val is None: - packing = packStack[-1][0] - else: - packing = int(opts[0]) - - if self.verbose: - print ">> Packing changed to %s at line %d" % (str(packing), i) - self.packList[file].append((i, packing)) - else: - pass ## Ignore any other directives - - result.append(newLine) - self.files[file] = '\n'.join(result) - - def evalPreprocessorExpr(self, expr): - ## make a few alterations so the expression can be eval'd - macroDiffs = ( - Literal('!').setParseAction(lambda: ' not ') | - Literal('&&').setParseAction(lambda: ' and ') | - Literal('||').setParseAction(lambda: ' or ') | - Word(alphas+'_',alphanums+'_').setParseAction(lambda: '0')) - expr2 = macroDiffs.transformString(expr) - - try: - ev = bool(eval(expr2)) - except: - if self.verbose: - print "Error evaluating preprocessor expression: %s [%s]" % (expr, expr2) - print " ", sys.exc_info()[1] - ev = False - return ev - - - - #def updateMacroDefns(self): - ##self.macroExpr << MatchFirst( [Keyword(m)('macro') for m in self.defs['macros']] ) - ##self.macroExpr.setParseAction(self.processMacroRef) - - ## regex is faster than pyparsing. - ## Matches quoted strings and macros - - ##names = self.defs['macros'].keys() + self.defs['fnmacros'].keys() - #if len(self.macroListString) == 0: - #self.macroRegex = None - #else: - #self.macroRegex = re.compile( - #r'("(\\"|[^"])*")|(\b(%s)\b)' % self.macroListString - #) - - #def updateFnMacroDefns(self): - #self.fnMacroExpr << MatchFirst( [(Keyword(m)('macro') + lparen + Group(delimitedList(expression))('args') + rparen) for m in self.defs['fnmacros']] ) - #self.fnMacroExpr.setParseAction(self.processFnMacroRef) - - - def processMacroDefn(self, t): - """Parse a #define macro and register the definition""" - if self.verbose: - print "MACRO:", t - #macroVal = self.macroExpander.transformString(t.value).strip() - #macroVal = Literal('\\\n').suppress().transformString(macroVal) ## remove escaped newlines - macroVal = t.value.strip() - if macroVal in self.defs['fnmacros']: - self.addDef('fnmacros', t.macro, self.defs['fnmacros'][macroVal]) - if self.verbose: - print " Copy fn macro %s => %s" % (macroVal, t.macro) - else: - if t.args == '': - val = self.evalExpr(macroVal) - self.addDef('macros', t.macro, macroVal) - self.addDef('values', t.macro, val) - if self.verbose: - print " Add macro:", t.macro, "("+str(val)+")", self.defs['macros'][t.macro] - else: - self.addDef('fnmacros', t.macro, self.compileFnMacro(macroVal, [x for x in t.args])) - if self.verbose: - print " Add fn macro:", t.macro, t.args, self.defs['fnmacros'][t.macro] - - #if self.macroListString == '': - #self.macroListString = t.macro - #else: - #self.macroListString += '|' + t.macro - #self.updateMacroDefns() - #self.macroExpr << MatchFirst( map(Keyword,self.defs['macros'].keys()) ) - return "#define " + t.macro + " " + macroVal - - - def compileFnMacro(self, text, args): - """Turn a function macro spec into a compiled description""" - ## find all instances of each arg in text - argRegex = re.compile(r'("(\\"|[^"])*")|(\b(%s)\b)' % ('|'.join(args))) - start = 0 - parts = [] - argOrder = [] - N = 3 - for m in argRegex.finditer(text): - arg = m.groups()[N] - #print m, arg - if arg is not None: - parts.append(text[start:m.start(N)] + '%s') - start = m.end(N) - argOrder.append(args.index(arg)) - parts.append(text[start:]) - return (''.join(parts), argOrder) - - - def expandMacros(self, line): - reg = re.compile(r'("(\\"|[^"])*")|(\b(\w+)\b)') - parts = [] - start = 0 - N = 3 ## the group number to check for macro names - macros = self.defs['macros'] - fnmacros = self.defs['fnmacros'] - for m in reg.finditer(line): - name = m.groups()[N] - if name in macros: - parts.append(line[start:m.start(N)]) - start = m.end(N) - parts.append(macros[name]) - elif name in fnmacros: - try: ## If function macro expansion fails, just ignore it. - exp, end = self.expandFnMacro(name, line[m.end(N):]) - parts.append(line[start:m.start(N)]) - start = end + m.end(N) - parts.append(exp) - except: - if sys.exc_info()[1][0] != 0: - print "Function macro expansion failed:", name, line[m.end(N):] - raise - parts.append(line[start:]) - return ''.join(parts) - - - - #def expandMacros(self, line): - #if self.macroRegex is None: - #return line - #parts = [] - #start = 0 - #N = 3 ## the group number to check for macro names - #for m in self.macroRegex.finditer(line): - #name = m.groups()[N] - #if name is not None: - #if name in self.defs['macros']: - #parts.append(line[start:m.start(N)]) - #start = m.end(N) - #parts.append(self.defs['macros'][name]) - #elif name in self.defs['fnmacros']: - #try: ## If function macro expansion fails, just ignore it. - #exp, end = self.expandFnMacro(name, line[m.end(N):]) - #parts.append(line[start:m.start(N)]) - #start = end + m.end(N) - #parts.append(exp) - #except: - #if sys.exc_info()[1][0] != 0: - #print "Function macro expansion failed:", name, line[m.end(N):] - #raise - - #else: - #raise Exception("Macro '%s' not found (internal error)" % name) - #parts.append(line[start:]) - #return ''.join(parts) - - def expandFnMacro(self, name, text): - #print "expandMacro:", name, text - defn = self.defs['fnmacros'][name] - ## defn looks like ('%s + %s / %s', (0, 0, 1)) - - argList = stringStart + lparen + Group(delimitedList(expression))('args') + rparen - res = [x for x in argList.scanString(text, 1)] - if len(res) == 0: - raise Exception(0, "Function macro '%s' not followed by (...)" % name) - args, start, end = res[0] - #print " ", res - #print " ", args - #print " ", defn - newStr = defn[0] % tuple([args[0][i] for i in defn[1]]) - #print " ", newStr - return (newStr, end) - - - # parse action to replace macro references with their respective definition - #def processMacroRef(self, t): - #return self.defs['macros'][t.macro] - - #def processFnMacroRef(self, t): - #m = self.defs['fnmacros'][t.macro] - ##print "=====>>" - ##print "Process FN MACRO:", t - ##print " macro defn:", t.macro, m - ##print " macro call:", t.args - ### m looks like ('a + b', ('a', 'b')) - #newStr = m[0][:] - ##print " starting str:", newStr - #try: - #for i in range(len(m[1])): - ##print " step", i - #arg = m[1][i] - ##print " arg:", arg, '=>', t.args[i] - - #newStr = Keyword(arg).copy().setParseAction(lambda: t.args[i]).transformString(newStr) - ##print " new str:", newStr - #except: - ##sys.excepthook(*sys.exc_info()) - #raise - ##print "<<=====" - #return newStr - - - - - - - - - def parseDefs(self, file, returnUnparsed=False): - """Scan through the named file for variable, struct, enum, and function declarations. - Returns the entire tree of successfully parsed tokens. - If returnUnparsed is True, return a string of all lines that failed to match (for debugging).""" - self.assertPyparsing() - self.currentFile = file - #self.definedType << kwl(self.defs['types'].keys()) - - parser = self.buildParser() - if returnUnparsed: - text = parser.suppress().transformString(self.files[file]) - return re.sub(r'\n\s*\n', '\n', text) - else: - return [x[0] for x in parser.scanString(self.files[file])] - - def buildParser(self): - """Builds the entire tree of parser elements for the C language (the bits we support, anyway). - """ - - if hasattr(self, 'parser'): - return self.parser - - - self.assertPyparsing() - - - self.structType = Forward() - self.enumType = Forward() - self.typeSpec = (typeQualifier + ( - fundType | - Optional(kwl(sizeModifiers + signModifiers)) + ident | - self.structType | - self.enumType - ) + typeQualifier + msModifier).setParseAction(recombine) - #self.argList = Forward() - - ### Abstract declarators for use in function pointer arguments - # Thus begins the extremely hairy business of parsing C declarators. - # Whomever decided this was a reasonable syntax should probably never breed. - # The following parsers combined with the processDeclarator function - # allow us to turn a nest of type modifiers into a correctly - # ordered list of modifiers. - - self.declarator = Forward() - self.abstractDeclarator = Forward() - - ## abstract declarators look like: - # - # * - # **[num] - # (*)(int, int) - # *( )(int, int)[10] - # ...etc... - self.abstractDeclarator << Group( - typeQualifier + Group(ZeroOrMore('*'))('ptrs') + typeQualifier + - ((Optional('&')('ref')) | (lparen + self.abstractDeclarator + rparen)('center')) + - Optional(lparen + Optional(delimitedList(Group( - self.typeSpec('type') + - self.abstractDeclarator('decl') + - Optional(Literal('=').suppress() + expression, default=None)('val') - )), default=None) + rparen)('args') + - Group(ZeroOrMore(lbrack + Optional(expression, default='-1') + rbrack))('arrays') - ) - - ## Argument list may consist of declarators or abstract declarators - #self.argList << delimitedList(Group( - #self.typeSpec('type') + - #(self.declarator('decl') | self.abstractDeclarator('decl')) + - #Optional(Keyword('=')) + expression - #)) - - ## declarators look like: - # varName - # *varName - # **varName[num] - # (*fnName)(int, int) - # * fnName(int arg1=0)[10] - # ...etc... - self.declarator << Group( - typeQualifier + callConv + Group(ZeroOrMore('*'))('ptrs') + typeQualifier + - ((Optional('&')('ref') + ident('name')) | (lparen + self.declarator + rparen)('center')) + - Optional(lparen + Optional(delimitedList(Group( - self.typeSpec('type') + - (self.declarator | self.abstractDeclarator)('decl') + - Optional(Literal('=').suppress() + expression, default=None)('val') - )), default=None) + rparen)('args') + - Group(ZeroOrMore(lbrack + Optional(expression, default='-1') + rbrack))('arrays') - ) - self.declaratorList = Group(delimitedList(self.declarator)) - - ## typedef - self.typeDecl = Keyword('typedef') + self.typeSpec('type') + self.declaratorList('declList') + semi - self.typeDecl.setParseAction(self.processTypedef) - - ## variable declaration - self.variableDecl = Group(self.typeSpec('type') + Optional(self.declaratorList('declList')) + Optional(Literal('=').suppress() + (expression('value') | (lbrace + Group(delimitedList(expression))('arrayValues') + rbrace)))) + semi - - self.variableDecl.setParseAction(self.processVariable) - - ## function definition - #self.paramDecl = Group(self.typeSpec + (self.declarator | self.abstractDeclarator)) + Optional(Literal('=').suppress() + expression('value')) - self.typelessFunctionDecl = self.declarator('decl') + nestedExpr('{', '}').suppress() - self.functionDecl = self.typeSpec('type') + self.declarator('decl') + nestedExpr('{', '}').suppress() - self.functionDecl.setParseAction(self.processFunction) - - - ## Struct definition - self.structDecl = Forward() - structKW = (Keyword('struct') | Keyword('union')) - #self.structType << structKW('structType') + ((Optional(ident)('name') + lbrace + Group(ZeroOrMore( Group(self.structDecl | self.variableDecl.copy().setParseAction(lambda: None)) ))('members') + rbrace) | ident('name')) - self.structMember = ( - Group(self.variableDecl.copy().setParseAction(lambda: None)) | - (self.typeSpec + self.declarator + nestedExpr('{', '}')).suppress() | - (self.declarator + nestedExpr('{', '}')).suppress() - ) - self.declList = lbrace + Group(OneOrMore(self.structMember))('members') + rbrace - self.structType << (Keyword('struct') | Keyword('union'))('structType') + ((Optional(ident)('name') + self.declList) | ident('name')) - - self.structType.setParseAction(self.processStruct) - #self.updateStructDefn() - - self.structDecl = self.structType + semi - - ## enum definition - enumVarDecl = Group(ident('name') + Optional(Literal('=').suppress() + (integer('value') | ident('valueName')))) - - self.enumType << Keyword('enum') + (Optional(ident)('name') + lbrace + Group(delimitedList(enumVarDecl))('members') + rbrace | ident('name')) - self.enumType.setParseAction(self.processEnum) - - self.enumDecl = self.enumType + semi - - - #self.parser = (self.typeDecl | self.variableDecl | self.structDecl | self.enumDecl | self.functionDecl) - self.parser = (self.typeDecl | self.variableDecl | self.functionDecl) - return self.parser - - def processDeclarator(self, decl): - """Process a declarator (without base type) and return a tuple (name, [modifiers]) - See processType(...) for more information.""" - toks = [] - name = None - #print "DECL:", decl - if 'callConv' in decl and len(decl['callConv']) > 0: - toks.append(decl['callConv']) - if 'ptrs' in decl and len(decl['ptrs']) > 0: - toks.append('*' * len(decl['ptrs'])) - if 'arrays' in decl and len(decl['arrays']) > 0: - #arrays = [] - #for x in decl['arrays']: - #n = self.evalExpr(x) - #if n == -1: ## If an array was given as '[]', interpret it as '*' instead. - #toks.append('*') - #else: - #arrays.append(n) - #if len(arrays) > 0: - #toks.append(arrays) - toks.append([self.evalExpr(x) for x in decl['arrays']]) - if 'args' in decl and len(decl['args']) > 0: - #print " process args" - if decl['args'][0] is None: - toks.append(()) - else: - toks.append(tuple([self.processType(a['type'], a['decl']) + (a['val'][0],) for a in decl['args']])) - if 'ref' in decl: - toks.append('&') - if 'center' in decl: - (n, t) = self.processDeclarator(decl['center'][0]) - if n is not None: - name = n - toks.extend(t) - if 'name' in decl: - name = decl['name'] - return (name, toks) - - def processType(self, typ, decl): - """Take a declarator + base type and return a serialized name/type description. - The description will be a list of elements (name, [basetype, modifier, modifier, ...]) - - name is the string name of the declarator or None for an abstract declarator - - basetype is the string representing the base type - - modifiers can be: - '*' - pointer (multiple pointers "***" allowed) - '&' - reference - '__X' - calling convention (windows only). X can be 'cdecl' or 'stdcall' - list - array. Value(s) indicate the length of each array, -1 for incomplete type. - tuple - function, items are the output of processType for each function argument. - - Examples: - int *x[10] => ('x', ['int', [10], '*']) - char fn(int x) => ('fn', ['char', [('x', ['int'])]]) - struct s (*)(int, int*) => (None, ["struct s", ((None, ['int']), (None, ['int', '*'])), '*']) - """ - #print "PROCESS TYPE/DECL:", typ, decl - (name, decl) = self.processDeclarator(decl) - return (name, [typ] + decl) - - - - def processEnum(self, s, l, t): - try: - if self.verbose: - print "ENUM:", t - if t.name == '': - n = 0 - while True: - name = 'anonEnum%d' % n - if name not in self.defs['enums']: - break - n += 1 - else: - name = t.name[0] - - if self.verbose: - print " name:", name - - if name not in self.defs['enums']: - i = 0 - enum = {} - for v in t.members: - if v.value != '': - i = eval(v.value) - if v.valueName != '': - i = enum[v.valueName] - enum[v.name] = i - self.addDef('values', v.name, i) - i += 1 - if self.verbose: - print " members:", enum - self.addDef('enums', name, enum) - self.addDef('types', 'enum '+name, ('enum', name)) - return ('enum ' + name) - except: - if self.verbose: - print "Error processing enum:", t - sys.excepthook(*sys.exc_info()) - - - def processFunction(self, s, l, t): - if self.verbose: - print "FUNCTION", t, t.keys() - - try: - (name, decl) = self.processType(t.type, t.decl[0]) - if len(decl) == 0 or type(decl[-1]) != tuple: - print t - raise Exception("Incorrect declarator type for function definition.") - if self.verbose: - print " name:", name - print " sig:", decl - self.addDef('functions', name, (decl[:-1], decl[-1])) - - except: - if self.verbose: - print "Error processing function:", t - sys.excepthook(*sys.exc_info()) - - - def packingAt(self, line): - """Return the structure packing value at the given line number""" - packing = None - for p in self.packList[self.currentFile]: - if p[0] <= line: - packing = p[1] - else: - break - return packing - - def processStruct(self, s, l, t): - try: - strTyp = t.structType # struct or union - - ## check for extra packing rules - packing = self.packingAt(lineno(l, s)) - - if self.verbose: - print strTyp.upper(), t.name, t - if t.name == '': - n = 0 - while True: - sname = 'anon_%s%d' % (strTyp, n) - if sname not in self.defs[strTyp+'s']: - break - n += 1 - else: - if type(t.name) is str: - sname = t.name - else: - sname = t.name[0] - if self.verbose: - print " NAME:", sname - if len(t.members) > 0 or sname not in self.defs[strTyp+'s'] or self.defs[strTyp+'s'][sname] == {}: - if self.verbose: - print " NEW " + strTyp.upper() - struct = [] - for m in t.members: - typ = m[0].type - val = self.evalExpr(m) - if self.verbose: - print " member:", m, m[0].keys(), m[0].declList - if len(m[0].declList) == 0: ## anonymous member - struct.append((None, [typ], None)) - for d in m[0].declList: - (name, decl) = self.processType(typ, d) - struct.append((name, decl, val)) - if self.verbose: - print " ", name, decl, val - self.addDef(strTyp+'s', sname, {'pack': packing, 'members': struct}) - self.addDef('types', strTyp+' '+sname, (strTyp, sname)) - #self.updateStructDefn() - return strTyp+' '+sname - except: - #print t - sys.excepthook(*sys.exc_info()) - - def processVariable(self, s, l, t): - if self.verbose: - print "VARIABLE:", t - try: - val = self.evalExpr(t[0]) - for d in t[0].declList: - (name, typ) = self.processType(t[0].type, d) - if type(typ[-1]) is tuple: ## this is a function prototype - if self.verbose: - print " Add function prototype:", name, typ, val - self.addDef('functions', name, (typ[:-1], typ[-1])) - else: - if self.verbose: - print " Add variable:", name, typ, val - self.addDef('variables', name, (val, typ)) - self.addDef('values', name, val) - except: - #print t, t[0].name, t.value - sys.excepthook(*sys.exc_info()) - - def processTypedef(self, s, l, t): - if self.verbose: - print "TYPE:", t - typ = t.type - #print t, t.type - for d in t.declList: - (name, decl) = self.processType(typ, d) - if self.verbose: - print " ", name, decl - self.addDef('types', name, decl) - #self.definedType << MatchFirst( map(Keyword,self.defs['types'].keys()) ) - - def evalExpr(self, toks): - ## Evaluates expressions. Currently only works for expressions that also - ## happen to be valid python expressions. - ## This function does not currently include previous variable - ## declarations, but that should not be too difficult to implement.. - #print "Eval:", toks - try: - if isinstance(toks, basestring): - #print " as string" - val = self.eval(toks, None, self.defs['values']) - elif toks.arrayValues != '': - #print " as list:", toks.arrayValues - val = [self.eval(x, None, self.defs['values']) for x in toks.arrayValues] - elif toks.value != '': - #print " as value" - val = self.eval(toks.value, None, self.defs['values']) - else: - #print " as None" - val = None - return val - except: - if self.verbose: - print " failed eval:", toks - print " ", sys.exc_info()[1] - return None - - def eval(self, expr, *args): - """Just eval with a little extra robustness.""" - expr = expr.strip() - cast = (lparen + self.typeSpec + self.abstractDeclarator + rparen).suppress() - expr = (quotedString | number | cast).transformString(expr) - if expr == '': - return None - return eval(expr, *args) - - def printAll(self, file=None): - """Print everything parsed from files. Useful for debugging.""" - from pprint import pprint - for k in self.dataList: - print "============== %s ==================" % k - if file is None: - pprint(self.defs[k]) - else: - pprint(self.fileDefs[file][k]) - - def addDef(self, typ, name, val): - """Add a definition of a specific type to both the definition set for the current file and the global definition set.""" - self.defs[typ][name] = val - if self.currentFile is None: - baseName = None - else: - baseName = os.path.basename(self.currentFile) - if baseName not in self.fileDefs: - self.fileDefs[baseName] = {} - for k in self.dataList: - self.fileDefs[baseName][k] = {} - self.fileDefs[baseName][typ][name] = val - - def remDef(self, typ, name): - if self.currentFile is None: - baseName = None - else: - baseName = os.path.basename(self.currentFile) - del self.defs[typ][name] - del self.fileDefs[baseName][typ][name] - - - def isFundType(self, typ): - """Return True if this type is a fundamental C type, struct, or union""" - if typ[0][:7] == 'struct ' or typ[0][:6] == 'union ' or typ[0][:5] == 'enum ': - return True - - names = baseTypes + sizeModifiers + signModifiers - for w in typ[0].split(): - if w not in names: - return False - return True - - def evalType(self, typ): - """evaluate a named type into its fundamental type""" - used = [] - while True: - if self.isFundType(typ): - ## remove 'signed' before returning evaluated type - typ[0] = re.sub(r'\bsigned\b', '', typ[0]).strip() - - - return typ - parent = typ[0] - if parent in used: - raise Exception('Recursive loop while evaluating types. (typedefs are %s)' % (' -> '.join(used+[parent]))) - used.append(parent) - if not parent in self.defs['types']: - raise Exception('Unknown type "%s" (typedefs are %s)' % (parent, ' -> '.join(used))) - pt = self.defs['types'][parent] - typ = pt + typ[1:] - - def find(self, name): - """Search all definitions for the given name""" - res = [] - for f in self.fileDefs: - fd = self.fileDefs[f] - for t in fd: - typ = fd[t] - for k in typ: - if isinstance(name, basestring): - if k == name: - res.append((f, t)) - else: - if re.match(name, k): - res.append((f, t, k)) - return res - - - - def findText(self, text): - """Search all file strings for text, return matching lines.""" - res = [] - for f in self.files: - l = self.files[f].split('\n') - for i in range(len(l)): - if text in l[i]: - res.append((f, i, l[i])) - return res - - -hasPyParsing = False -try: - from pyparsing import * - ParserElement.enablePackrat() - hasPyParsing = True -except: - pass ## no need to do anything yet as we might not be using any parsing functions.. - - -## Define some common language elements if pyparsing is available. -if hasPyParsing: - ## Some basic definitions - expression = Forward() - pexpr = '(' + expression + ')' - numTypes = ['int', 'float', 'double', '__int64'] - baseTypes = ['char', 'bool', 'void'] + numTypes - sizeModifiers = ['short', 'long'] - signModifiers = ['signed', 'unsigned'] - qualifiers = ['const', 'static', 'volatile', 'inline', 'restrict', 'near', 'far'] - msModifiers = ['__based', '__declspec', '__fastcall', '__restrict', '__sptr', '__uptr', '__w64', '__unaligned', '__nullterminated'] - keywords = ['struct', 'enum', 'union', '__stdcall', '__cdecl'] + qualifiers + baseTypes + sizeModifiers + signModifiers - - def kwl(strs): - """Generate a match-first list of keywords given a list of strings.""" - #return MatchFirst(map(Keyword,strs)) - return Regex(r'\b(%s)\b' % '|'.join(strs)) - - keyword = kwl(keywords) - wordchars = alphanums+'_$' - ident = (WordStart(wordchars) + ~keyword + Word(alphas+"_",alphanums+"_$") + WordEnd(wordchars)).setParseAction(lambda t: t[0]) - #integer = Combine(Optional("-") + (Word( nums ) | Combine("0x" + Word(hexnums)))) - semi = Literal(";").ignore(quotedString).suppress() - lbrace = Literal("{").ignore(quotedString).suppress() - rbrace = Literal("}").ignore(quotedString).suppress() - lbrack = Literal("[").ignore(quotedString).suppress() - rbrack = Literal("]").ignore(quotedString).suppress() - lparen = Literal("(").ignore(quotedString).suppress() - rparen = Literal(")").ignore(quotedString).suppress() - hexint = Regex('-?0x[%s]+[UL]*'%hexnums).setParseAction(lambda t: t[0].rstrip('UL')) - decint = Regex(r'-?\d+[UL]*').setParseAction(lambda t: t[0].rstrip('UL')) - integer = (hexint | decint) - floating = Regex(r'-?((\d+(\.\d*)?)|(\.\d+))([eE]-?\d+)?') - number = (hexint | floating | decint) - bitfieldspec = ":" + integer - biOperator = oneOf("+ - / * | & || && ! ~ ^ % == != > < >= <= -> . :: << >> = ? :") - uniRightOperator = oneOf("++ --") - uniLeftOperator = oneOf("++ -- - + * sizeof new") - name = (WordStart(wordchars) + Word(alphas+"_",alphanums+"_$") + WordEnd(wordchars)) - #number = Word(hexnums + ".-+xUL").setParseAction(lambda t: t[0].rstrip('UL')) - #stars = Optional(Word('*&'), default='')('ptrs') ## may need to separate & from * later? - callConv = Optional(Keyword('__cdecl')|Keyword('__stdcall'))('callConv') - - ## Removes '__name' from all type specs.. may cause trouble. - underscore2Ident = (WordStart(wordchars) + ~keyword + '__' + Word(alphanums,alphanums+"_$") + WordEnd(wordchars)).setParseAction(lambda t: t[0]) - typeQualifier = ZeroOrMore((underscore2Ident + Optional(nestedExpr())) | kwl(qualifiers)).suppress() - - msModifier = ZeroOrMore(kwl(msModifiers) + Optional(nestedExpr())).suppress() - pointerOperator = ( - '*' + typeQualifier | - '&' + typeQualifier | - '::' + ident + typeQualifier - ) - - - ## language elements - fundType = OneOrMore(kwl(signModifiers + sizeModifiers + baseTypes)).setParseAction(lambda t: ' '.join(t)) - - - - ## Is there a better way to process expressions with cast operators?? - castAtom = ( - ZeroOrMore(uniLeftOperator) + Optional('('+ident+')').suppress() + - (( - ident + '(' + Optional(delimitedList(expression)) + ')' | - ident + OneOrMore('[' + expression + ']') | - ident | number | quotedString - ) | - ('(' + expression + ')')) + - ZeroOrMore(uniRightOperator) - ) - uncastAtom = ( - ZeroOrMore(uniLeftOperator) + - (( - ident + '(' + Optional(delimitedList(expression)) + ')' | - ident + OneOrMore('[' + expression + ']') | - ident | number | quotedString - ) | - ('(' + expression + ')')) + - ZeroOrMore(uniRightOperator) - ) - atom = castAtom | uncastAtom - - expression << Group( - atom + ZeroOrMore(biOperator + atom) - ) - arrayOp = lbrack + expression + rbrack - - def recombine(tok): - """Flattens a tree of tokens and joins into one big string.""" - return " ".join(flatten(tok.asList())) - expression.setParseAction(recombine) - - def flatten(lst): - res = [] - for i in lst: - if type(i) in [list, tuple]: - res.extend(flatten(i)) - else: - res.append(str(i)) - return res - - def printParseResults(pr, depth=0, name=''): - """For debugging; pretty-prints parse result objects.""" - start = name + " "*(20-len(name)) + ':'+ '..'*depth - if isinstance(pr, ParseResults): - print start - for i in pr: - name = '' - for k in pr.keys(): - if pr[k] is i: - name = k - break - printParseResults(i, depth+1, name) - else: - print start + str(pr) - - - -## Just for fun.. -if __name__ == '__main__': - files = sys.argv[1:] - p = CParser(files) - p.processAll() - p.printAll() - \ No newline at end of file diff --git a/scripts/module/pyclibrary/README.md b/scripts/module/pyclibrary/README.md deleted file mode 100644 index f1af9afb7..000000000 --- a/scripts/module/pyclibrary/README.md +++ /dev/null @@ -1,8 +0,0 @@ -pyclibrary -========== - -C parser and ctypes automation for Python. - -Fork of . (`bzr branch lp:pyclibrary pyclibrary-bzr && mkdir pyclibrary && cd pyclibrary && bar fast-export --plain ../pyclibrary-bzr | git fast-import`) - -Pyclibrary includes 1) a pure-python C parser and 2) a ctypes automation library that uses C header file definitions to simplify the use of ctypes. The C parser currently processes all macros, typedefs, structs, unions, enums, function prototypes, and global variable declarations, and can evaluate typedefs down to their fundamental C types + pointers/arrays/function signatures. Pyclibrary can automatically build ctypes structs/unions and perform type conversions when calling functions via cdll/windll. diff --git a/scripts/module/pyclibrary/__init__.py b/scripts/module/pyclibrary/__init__.py deleted file mode 100644 index 618aaa1a8..000000000 --- a/scripts/module/pyclibrary/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -from CParser import * -from CLibrary import * \ No newline at end of file diff --git a/scripts/module/pyclibrary/license.txt b/scripts/module/pyclibrary/license.txt deleted file mode 100644 index 3d04b87ea..000000000 --- a/scripts/module/pyclibrary/license.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2010 Luke Campagnola ('luke.campagnola@%s.com' % 'gmail') - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/scripts/module/pyclibrary/pyparsing.py b/scripts/module/pyclibrary/pyparsing.py deleted file mode 100644 index dec506ed0..000000000 --- a/scripts/module/pyclibrary/pyparsing.py +++ /dev/null @@ -1,3754 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2011 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -#from __future__ import generators - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word( alphas ) + "," + Word( alphas ) + "!" - - hello = "Hello, World!" - print hello, "->", greet.parseString( hello ) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" - -__version__ = "1.5.6" -__versionTime__ = "1 May 2011 23:41" -__author__ = "Paul McGuire " - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums', -'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', -] - -""" -Detect if we are running version 3.X and make appropriate changes -Robert A. Clark -""" -_PY3K = sys.version_info[0] > 2 -if _PY3K: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - _ustr = str - alphas = string.ascii_lowercase + string.ascii_uppercase -else: - _MAX_INT = sys.maxint - range = xrange - set = lambda s : dict( [(c,0) for c in s] ) - alphas = string.lowercase + string.uppercase - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... - - alphas = string.lowercase + string.uppercase - -# build list of single arg builtins, tolerant of Python version, that can be used as parse actions -singleArgBuiltins = [] -import __builtin__ -for fname in "sum len enumerate sorted reversed list tuple set any all".split(): - try: - singleArgBuiltins.append(getattr(__builtin__,fname)) - except AttributeError: - continue - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()] - for from_,to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -class _Constants(object): - pass - -nums = string.digits -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join( [ c for c in string.printable if c not in string.whitespace ] ) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( [line_str[:line_column], - markerString, line_str[line_column:]]) - return line_str.strip() - def __dir__(self): - return "loc msg pstr parserElement lineno col line " \ - "markInputLine __str__ __repr__".split() - -class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like C{ParseFatalException}, but thrown internally when an - C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because - an unbacktrackable syntax error has been found""" - def __init__(self, pe): - super(ParseSyntaxException, self).__init__( - pe.pstr, pe.loc, pe.msg, pe.parserElement) - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by C{validate()} if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.}) - """ - #~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" ) - def __new__(cls, toklist, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - if isinstance(toklist, list): - self.__toklist = toklist[:] - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not toklist in (None,'',[]): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,int): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return len( self.__toklist ) > 0 - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def keys( self ): - """Returns all named result keys.""" - return self.__tokdict.keys() - - def pop( self, index=-1 ): - """Removes and returns item at specified index (default=last). - Will work with either numeric indices or dict-key indicies.""" - ret = self[index] - del self[index] - return ret - - def get(self, key, defaultValue=None): - """Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified.""" - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """Inserts new element at location index in the list of parsed tokens.""" - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name in self.__tokdict: - occurrences = self.__tokdict[name] - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def items( self ): - """Returns all named result keys and values as a list of tuples.""" - return [(k,self[k]) for k in self.__tokdict] - - def values( self ): - """Returns all named result values.""" - return [ v[-1][0] for v in self.__tokdict.values() ] - - def __getattr__( self, name ): - if True: #name not in self.__slots__: - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - return None - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = ( lambda a: (a<0 and offset) or (a+offset) ) - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - return self.copy() - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - out = "[" - sep = "" - for i in self.__toklist: - if isinstance(i, ParseResults): - out += sep + _ustr(i) - else: - out += sep + repr(i) - sep = ", " - out += "]" - return out - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" - out = [] - for res in self.__toklist: - if isinstance(res,ParseResults): - out.append( res.asList() ) - else: - out.append( res ) - return out - - def asDict( self ): - """Returns the named parse results as dictionary.""" - return dict( self.items() ) - - def copy( self ): - """Returns a new copy of a C{ParseResults} object.""" - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" - nl = "\n" - out = [] - namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist ] ) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - worklist = self.__toklist - for i,res in enumerate(worklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "" ] - - out += [ nl, indent, "" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - """Returns the results name for this token expression.""" - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - self.__tokdict.values()[0][0][1] in (0,-1)): - return self.__tokdict.keys()[0] - else: - return None - - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data.""" - out = [] - out.append( indent+_ustr(self.asList()) ) - keys = self.items() - keys.sort() - for k,v in keys: - if out: - out.append('\n') - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v.keys(): - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(_ustr(v)) - return "".join(out) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - self.__tokdict, \ - par, \ - inAccumNames, \ - self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __dir__(self): - return dir(super(ParseResults,self)) + self.keys() - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return (loc} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -'decorator to trim function calls to match the arity of the target' -if not _PY3K: - def _trim_arity(func, maxargs=2): - limit = [0] - def wrapper(*args): - while 1: - try: - return func(*args[limit[0]:]) - except TypeError: - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - return wrapper -else: - def _trim_arity(func, maxargs=2): - limit = maxargs - def wrapper(*args): - #~ nonlocal limit - while 1: - try: - return func(*args[limit:]) - except TypeError: - if limit: - limit -= 1 - continue - raise - return wrapper - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """Define name for this expression, for use in debugging.""" - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. - """ - newself = self.copy() - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a ParseResults object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{ParseFatalException} - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException: - #~ print ("Exception raised:", err) - err = None - if self.debugActions[2]: - err = sys.exc_info()[1] - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - if err is None: - err = sys.exc_info()[1] - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - err = sys.exc_info()[1] - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value, Exception): - raise value - return (value[0],value[1].copy()) - else: - try: - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) - return value - except ParseBaseException: - pe = sys.exc_info()[1] - ParserElement._exprArgCache[ lookup ] = pe - raise - - _parse = _parseNoCache - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} - def resetCache(): - ParserElement._exprArgCache.clear() - resetCache = staticmethod(resetCache) - - _packratEnabled = False - def enablePackrat(): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - ParserElement._parse = ParserElement._parseCache - enablePackrat = staticmethod(enablePackrat) - - def parseString( self, instring, parseAll=False ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{StringEnd()}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - loc = self.preParse( instring, loc ) - se = Empty() + StringEnd() - se._parse( instring, loc ) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing - strings with embedded tabs.""" - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn( instring, loc ) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def transformString( self, instring ): - """Extension to C{scanString}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string.""" - out = [] - lastE = 0 - # force preservation of s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def searchString( self, instring, maxMatches=_MAX_INT ): - """Another extension to C{scanString}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def __add__(self, other ): - """Implementation of + operator - returns And""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """Implementation of + operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """Implementation of - operator, returns C{And} with error stop""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, And._ErrorStop(), other ] ) - - def __rsub__(self, other ): - """Implementation of - operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + ZeroOrMore(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)} - - C{expr*(1,None)} is equivalent to C{OneOrMore(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """Implementation of | operator - returns C{MatchFirst}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """Implementation of | operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """Implementation of ^ operator - returns C{Or}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """Implementation of ^ operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """Implementation of & operator - returns C{Each}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """Implementation of & operator when left operand is not a C{ParserElement}""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """Implementation of ~ operator - returns C{NotAny}""" - return NotAny( self ) - - def __call__(self, name): - """Shortcut for C{setResultsName}, with C{listAllMatches=default}:: - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - could be written as:: - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. - """ - if not name.endswith("*"): - return self.setResultsName(name) - else: - return self.setResultsName(name[:-1], listAllMatches=True) - - def suppress( self ): - """Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """Overrides default behavior to expand C{}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{} characters.""" - self.keepTabs = True - return self - - def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - """ - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append( other.copy() ) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable.""" - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - f = open(file_or_filename, "rb") - file_contents = f.read() - f.close() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - exc = sys.exc_info()[1] - raise exc - - def getException(self): - return ParseException("",0,self.errmsg,self) - - def __getattr__(self,aname): - if aname == "myException": - self.myException = ret = self.getException(); - return ret; - else: - raise AttributeError("no such attribute " + aname) - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or self.__dict__ == other.__dict__ - elif isinstance(other, basestring): - try: - self.parseString(_ustr(other), parseAll=True) - return True - except ParseBaseException: - return False - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - -class Token(ParserElement): - """Abstract C{ParserElement} subclass, for defining atomic matching patterns.""" - def __init__( self ): - super(Token,self).__init__( savelist=False ) - #self.myException = ParseException("",0,"",self) - - def setName(self, name): - s = super(Token,self).setName(name) - self.errmsg = "Expected " + self.name - #s.myException.msg = self.errmsg - return s - - -class Empty(Token): - """An empty token, will always match.""" - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match.""" - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - -class Literal(Token): - """Token to exactly match a specified string.""" - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc -_L = Literal - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{Literal}:: - Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}. - Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive - matching, default is C{False}. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): - super(Keyword,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class CaselessKeyword(Keyword): - def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): - super(Word,self).__init__() - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.bodyCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Regex(Token): - """Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0): - """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if len(pattern) == 0: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - d = result.groupdict() - ret = ParseResults(result.group()) - if d: - for k in d: - ret[k] = d[k] - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - -class QuotedString(Token): - """Token for matching strings that are delimited by quoting characters. - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): - """ - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=None) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - - multiline - boolean indicating whether quotes can span multiple lines (default=False) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar) - """ - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if len(quoteChar) == 0: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if len(endQuoteChar) == 0: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-') - self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset) - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given set. - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - #self.myException.msg = self.errmsg - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{Word} class.""" - whiteStrs = { - " " : "", - "\t": "", - "\n": "", - "\r": "", - "\f": "", - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) ) - #~ self.leaveWhitespace() - self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite])) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - #self.myException.msg = self.errmsg - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for tabular report scraping.""" - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - -class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within the parse string""" - def __init__( self ): - super(LineStart,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected start of line" - #self.myException.msg = self.errmsg - - def preParse( self, instring, loc ): - preloc = super(LineStart,self).preParse(instring,loc) - if instring[preloc] == "\n": - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - if not( loc==0 or - (loc == self.preParse( instring, 0 )) or - (instring[loc-1] == "\n") ): #col(loc, instring) != 1: - #~ raise ParseException( instring, loc, "Expected start of line" ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - -class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the parse string""" - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - #self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if loc len(instring): - return loc, [] - else: - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - -class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - if loc2 > maxMatchLoc: - maxMatchLoc = loc2 - maxMatchExp = e - - if maxMatchLoc < 0: - if maxException is not None: - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - return maxMatchExp._parse( instring, loc, doActions ) - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if exprs: - self.mayReturnEmpty = False - for e in self.exprs: - if e.mayReturnEmpty: - self.mayReturnEmpty = True - break - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException, err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = Literal( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = True - for e in self.exprs: - if not e.mayReturnEmpty: - self.mayReturnEmpty = False - break - self.skipWhitespace = True - self.initExprGroups = True - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(e) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join( [ _ustr(e) for e in tmpReqd ] ) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = ParseResults([]) - for r in resultlist: - dups = {} - for k in r.keys(): - if k in finalResults.keys(): - tmp = ParseResults(finalResults[k]) - tmp += ParseResults(r[k]) - dups[k] = tmp - finalResults += ParseResults(r) - for k,v in dups.items(): - finalResults[k] = v - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.""" - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - expr = Literal(expr) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. C{FollowedBy} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list.""" - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] - - -class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. C{NotAny} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression does *not* match at the current - position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator.""" - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - try: - self.expr.tryParse( instring, loc ) - except (ParseException,IndexError): - pass - else: - #~ raise ParseException(instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - - -class ZeroOrMore(ParseElementEnhance): - """Optional repetition of zero or more of the given expression.""" - def __init__( self, expr ): - super(ZeroOrMore,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - tokens = [] - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - - -class OneOrMore(ParseElementEnhance): - """Repetition of one or more of the given expression.""" - def parseImpl( self, instring, loc, doActions=True ): - # must be at least one - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.keys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(OneOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """Optional matching of the given expression. - A default return string can also be specified, if the optional expression - is not found. - """ - def __init__( self, exprs, default=_optionalNotMatched ): - super(Optional,self).__init__( exprs, savelist=False ) - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - - -class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched expression is found. - If C{include} is set to true, the matched expression is also parsed (the skipped text - and matched expression are returned as a 2-element list). The C{ignore} - argument is used to define grammars (typically quoted strings and comments) that - might contain false matches. - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.asList = False - if failOn is not None and isinstance(failOn, basestring): - self.failOn = Literal(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - #self.myException = ParseException("",0,self.errmsg,self) - - def parseImpl( self, instring, loc, doActions=True ): - startLoc = loc - instrlen = len(instring) - expr = self.expr - failParse = False - while loc <= instrlen: - try: - if self.failOn: - try: - self.failOn.tryParse(instring, loc) - except ParseBaseException: - pass - else: - failParse = True - raise ParseException(instring, loc, "Found expression " + str(self.failOn)) - failParse = False - if self.ignoreExpr is not None: - while 1: - try: - loc = self.ignoreExpr.tryParse(instring,loc) - # print "found ignoreExpr, advance to", loc - except ParseBaseException: - break - expr._parse( instring, loc, doActions=False, callPreParse=False ) - skipText = instring[startLoc:loc] - if self.includeMatch: - loc,mat = expr._parse(instring,loc,doActions,callPreParse=False) - if mat: - skipRes = ParseResults( skipText ) - skipRes += mat - return loc, [ skipRes ] - else: - return loc, [ skipText ] - else: - return loc, [ skipText ] - except (ParseException,IndexError): - if failParse: - raise - else: - loc += 1 - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = Literal(other) - self.expr = other - self.mayReturnEmpty = other.mayReturnEmpty - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return None - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret << self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """Abstract subclass of C{ParseExpression}, for converting parsed results.""" - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Upcase(TokenConverter): - """Converter to upper case all matching tokens.""" - def __init__(self, *args): - super(Upcase,self).__init__(*args) - warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead", - DeprecationWarning,stacklevel=2) - - def postParse( self, instring, loc, tokenlist ): - return list(map( string.upper, tokenlist )) - - -class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and len(retToks.keys())>0: - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for returning tokens of C{ZeroOrMore} and C{OneOrMore} expressions.""" - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. - """ - def __init__( self, exprs ): - super(Dict,self).__init__( exprs ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression.""" - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once.""" - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """Decorator for debugging parse actions.""" - f = _trim_arity(f) - def z(*paArgs): - thisFunc = f.func_name - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception: - exc = sys.exc_info()[1] - sys.stderr.write( "<", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) ) - else: - return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) ) - except: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst( [ parseElementClass(sym) for sym in symbols ] ) - -def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. - """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) - -def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not - require the inspect module to chase up the call stack. By default, returns a - string containing the original parsed text. - - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{ParseResults} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values.""" - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - del t[:] - t.insert(0, s[t._original_start:t._original_end]) - del t["_original_start"] - del t["_original_end"] - matchExpr.setParseAction(extractText) - return matchExpr - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p) - -def srange(s): - r"""Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be:: - a single character - an escaped character with a leading backslash (such as \- or \]) - an escaped hex character with a leading '\x' (\x21, which is a '!' character) - (\0x## is also supported for backwards compatibility) - an escaped octal character with a leading '\0' (\041, which is a '!' character) - a range of any of the above, separated by a dash ('a-z', etc.) - any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) - """ - try: - return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body]) - except: - return "" - -def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at a specific - column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{transformString()}. - """ - def _replFunc(*args): - return [replStr] - return _replFunc - -def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed quoted strings. - To use, add this parse action to quoted string using:: - quotedString.setParseAction( removeQuotes ) - """ - return t[0][1:-1] - -def upcaseTokens(s,l,t): - """Helper parse action to convert tokens to upper case.""" - return [ tt.upper() for tt in map(_ustr,t) ] - -def downcaseTokens(s,l,t): - """Helper parse action to convert tokens to lower case.""" - return [ tt.lower() for tt in map(_ustr,t) ] - -def keepOriginalText(s,startLoc,t): - """DEPRECATED - use new helper method C{originalTextFor}. - Helper parse action to preserve original parsed text, - overriding any nested parse actions.""" - try: - endloc = getTokensEndLoc() - except ParseException: - raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") - del t[:] - t += ParseResults(s[startLoc:endloc]) - return t - -def getTokensEndLoc(): - """Method to be called from within a parse action to determine the end - location of the parsed tokens.""" - import inspect - fstack = inspect.stack() - try: - # search up the stack (through intervening argument normalizers) for correct calling routine - for f in fstack[2:]: - if f[3] == "_parseNoCache": - endloc = f[0].f_locals["loc"] - return endloc - else: - raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") - finally: - del fstack - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] ) - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % tagStr) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, given a tag name""" - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, given a tag name""" - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start tags created - with C{makeXMLTags} or C{makeHTMLTags}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{} or C{
}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def operatorPrecedence( baseExpr, opList ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants opAssoc.RIGHT and opAssoc.LEFT. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - """ - ret = Forward() - lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward()#.setName("expr%d" % i) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - matchExpr.setParseAction( pa ) - thisExpr << ( matchExpr | lastExpr ) - lastExpr = thisExpr - ret << lastExpr - return ret - -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()) - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default="("); can also be a pyparsing expression - - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - - content - expression for items within the nested lists (default=None) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=True) - - A valid block must contain at least one C{blockStatement}. - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = Empty() + Empty().setParseAction(checkSubIndent) - PEER = Empty().setParseAction(checkPeerIndent) - UNDENT = Empty().setParseAction(checkUnindent) - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) -commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline() -_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "')) -replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") - -htmlComment = Regex(r"") -restOfLine = Regex(r".*").leaveWhitespace() -dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") -cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?" + str(tokenlist)) - print ("tokens = " + str(tokens)) - print ("tokens.columns = " + str(tokens.columns)) - print ("tokens.tables = " + str(tokens.tables)) - print (tokens.asXML("SQL",True)) - except ParseBaseException: - err = sys.exc_info()[1] - print (teststring + "->") - print (err.line) - print (" "*(err.column-1) + "^") - print (err) - print() - - selectToken = CaselessLiteral( "select" ) - fromToken = CaselessLiteral( "from" ) - - ident = Word( alphas, alphanums + "_$" ) - columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - columnNameList = Group( delimitedList( columnName ) )#.setName("columns") - tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - tableNameList = Group( delimitedList( tableName ) )#.setName("tables") - simpleSQL = ( selectToken + \ - ( '*' | columnNameList ).setResultsName( "columns" ) + \ - fromToken + \ - tableNameList.setResultsName( "tables" ) ) - - test( "SELECT * from XYZZY, ABC" ) - test( "select * from SYS.XYZZY" ) - test( "Select A from Sys.dual" ) - test( "Select AA,BB,CC from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Select A, B, C from Sys.dual" ) - test( "Xelect A, B, C from Sys.dual" ) - test( "Select A, B, C frox Sys.dual" ) - test( "Select" ) - test( "Select ^^^ frox Sys.dual" ) - test( "Select A, B, C from Sys.dual, Table2 " ) diff --git a/scripts/xml2aloe/__init__.py b/scripts/xml2aloe/__init__.py deleted file mode 100644 index 1a26a8b67..000000000 --- a/scripts/xml2aloe/__init__.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright 2012-2013 The libLTE Developers. See the -# COPYRIGHT file at the top-level directory of this distribution. -# -# This file is part of the libLTE library. -# -# libLTE is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as -# published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. -# -# libLTE is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# A copy of the GNU Lesser General Public License can be found in -# the LICENSE file in the top-level directory of this distribution -# and at http://www.gnu.org/licenses/. -# - - - -import shutil -import os - -def MakeModule(m): - dir = os.path.dirname(__file__) - - tdir = os.path.join(dir, 'template') - ddir = tdir + m.directory_name - print 'Creating new directory ' + ddir - - shutil.copytree(tdir,ddir) - - WriteAloeC(m,tdir + '/src/template.c',\ - ddir + '/src/' + m.name + '.c') - WriteAloeH(m,tdir + '/src/template.h',\ - ddir + '/src/' + m.name + '.h') - - -def WriteInitParamsGet(m, f): - for p in m.init_params: - f.write('\tif (param_get_' + p['type'] + '_name("' + p['name'] + '", &' + p['variable']+ ')) {\n' - '\t\t' + p['variable'] + ' = ' + str(p['default']) + ';\n'\ - '\t}\n') - -def WriteInputParamsHandlers(m, f): - for p in m.input_params: - f.write('pmid_t ' + p['name'] + '_id;\n') - -def WriteInputParamsId(m, f): - for p in m.input_params: - f.write('\t' + p['name'] + '_id = param_id("' + p['name'] + '");\n') - -def WriteInputParamsGet(m, f): - for p in m.input_params: - f.write('\tif (param_get_' + p['type'] + '(' + p['name'] + '_id, &' + p['variable'] + ') != 1) {\n') - if p['default'] == None: - f.write('\t\t' + r'moderror("Error getting parameter ' + p['name'] + '\\n");' + '\n') - f.write('\t\treturn -1;\n\t}\n') - else: - f.write('\t\t' + r'modinfo("Parameter ' + p['name'] + ' not found. Setting to default (' + p['default'] + ')\\n");' + '\n') - f.write('\t\t' + p['variable'] + ' = ' + p['default'] + ';\n\t}\n') - - -def WriteAloeC(m, input, output): - with open(input,'r') as f: - newlines = [] - for line in f.readlines(): - newlines.append(line.replace('-name-', m.name)) - with open(output, 'w') as f: - for line in newlines: - - if '--input_parameters_handlers--' in line: - WriteInputParamsHandlers(m, f) - elif '--input_parameters_getid--' in line: - WriteInputParamsId(m, f) - elif '--init_parameters--' in line: - WriteInitParamsGet(m, f) - elif '--input_parameters--' in line: - WriteInputParamsGet(m, f) - else: - f.write(line) - -def WriteAloeH(m, input, output): - with open(input,'r') as f: - newlines = [] - for line in f.readlines(): - if '-typeinput-' in line: - if m.nof_inputs > 0: - newlines.append(line.replace('-typeinput-', m.input_type)) - elif '-numinputs-' in line: - newlines.append(line.replace('-numinputs-', str(m.nof_inputs))) - elif '-sizeinput-' in line: - if m.nof_inputs > 0: - newlines.append(line.replace('-sizeinput-', m.input_size)) - else: - newlines.append(line.replace('-sizeinput-', '0')) - elif '-typeoutput-' in line: - if m.nof_outputs > 0: - newlines.append(line.replace('-typeoutput-', m.output_type)) - elif '-numoutputs-' in line: - newlines.append(line.replace('-numoutputs-', str(m.nof_outputs))) - elif '-sizeoutput-' in line: - if m.nof_outputs > 0: - newlines.append(line.replace('-sizeoutput-', m.output_size)) - else: - newlines.append(line.replace('-sizeoutput-', '0')) - else: - newlines.append(line) - - with open(output, 'w') as f: - for line in newlines: - f.write(line) - - \ No newline at end of file diff --git a/scripts/xml2aloe/__init__.pyc b/scripts/xml2aloe/__init__.pyc deleted file mode 100644 index cf1f75581e47657b1296104cf062513950c9a2ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4093 zcmcIn&vO)25Pmb+Y<8A}1R|^m%CH*TD9aKKdO#ElEZ{&&hqACxYF&0FWRl&PW!{Sf zB?pZ+t>R=+mi_^H_Tli%oNl;oWN0= zuG(vE#epjj2@7~Z%~KT|xUxA0k@7&K z_>jni6@fXf)=D_rBG$tRqi(O$GLf#}<9rsiOq|^Eljx2g##z)hX?EAw)8I99)@}@> zPqsDhDRY|E#>B%y%a*lV+XD$hu~Zw{sMj*fW&#BENLt-UPSR{H#hs|x6p58IPRw2_ zVNcN%!6K&g@|uY|qSH=$cTE;WxVKW*ve-m#cGBp)EFtnE^OtJe+|W~9I7GG=QG;^` z5rU|Jp=8jnf@&8V2iU?nUA1&(rVm(?{v0$sBxzVqTG zzt(GIt#0#HWUL#^-Cm^WD6xL68t_({m#LockBbXFYGrZjX2*)n&- zeK9h#nALQHa%P<3mS^&qUI1(TrQ9<|AL*G98`EBkV#Z!K4x7feX5^~PI6PCX5dnrb zjg#J*Xgy8uuw)i(h)wKllaO2ST*=|kX&n`wv~pt%{&g1!Dq zZibg1X@(rENL3>y;Zlp5e)X&|Wd;PcE9KSr)96QA;O+!YAOS|SBS%#6>ZW0oWocHczQvc1#3S`bFc6t2^CfM3kW}Z-2wKXTK=HgRGHY4l z*B2zCXs(O3;$$hURRzjc*~h$}q{d%L*OKt0f3?59N&V2KCBG&CKX13_=dEqIY72Q% zBo@;odV+DuyKIk3t^ze> zqoCGLv9|!OHd$DzYGYj8E3iMgH?CG}>w$)%z;;7WHK(qzOuX|R7X${S5xM^r7hL~> zsuyl-auI+NE36h(_N785XPaVsmfI_Aruw|^6k3{59=@&K&kDt2l!tH2`$M79jPQ5~ zV0YY}a%FHz9Cgs!Ftudj+BzG7x`6bKW+d0^Lur;c(aq(4-mBL&v&;?>KxQ0*P4AG6 zGM|G~WB#cf)#~*cBS2u2_M*hlOK7sfPMky<)=;X|!+a=}GK5e`ho z!BGxU5vXL#B9o|GMHWie3{sYq2`S9z;p{FfKcsf2ZUnr-2|W5a3}|OiC}*AHcu%>L zfaTER=HR3e1e^r~{|GpLbhg0xy)y*PZye%EkV#xE6951zAh=pk*-s8}J4Ofqt`w90 zd4SuWoZE9lp!`};o=X?{IK*;5`9KE>Ia2=`-R;*=eE~RH^MU5J!{FG(oQG9&9G`G_ zO8j)#@eTripn7Xn-T}b3L2zUA#-+g>O55%rVL^-iFuL1=QGJNvEajoyMhuFwGkk}f zw_{e2RqoG8Ryb_4!mOkgMCqP9O8<2FxkRE=`p&=yE;PwHJ59lIJ;rB$GFj`&oUU6T z9e>;xl5jqy*)xf%1%KU6K+S$bPb8?yhmo-ii?1DRpYQW-a^U+p%lFY=U5OA}ta|`vqenOS7 d-fJl9`2F-cx0l*K=}fxbv{&>B__xQK{ST0)IoSXJ diff --git a/scripts/xml2aloe/template/CMakeLists.txt b/scripts/xml2aloe/template/CMakeLists.txt deleted file mode 100644 index 48fbe4769..000000000 --- a/scripts/xml2aloe/template/CMakeLists.txt +++ /dev/null @@ -1,94 +0,0 @@ -# This configuration is for the aloe++ skeleton - -# set-up the program libraries here -set(LIBRARIES m rt) - -# set-up program includes here -include_directories(/usr/local/include/) - -############## DO NOT NEED TO MODIFY BEYOND HERE - -get_filename_component(module ${CMAKE_CURRENT_SOURCE_DIR} NAME) - -if( CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR ) - cmake_minimum_required (VERSION 2.6) - project (${module}) - - # The version number. - set (OECORE_VERSION_MAJOR 1) - set (OECORE_VERSION_MINOR 0) - set(MODULE_REPOS_NAME "default") - -else() - include_directories(${OESR_INCLUDE}) -endif() - - -file(GLOB_RECURSE SOURCES "src/*.c") -file(GLOB_RECURSE TEST_SOURCES "test/*.c") - -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src) - -# aloe module -add_library(${module}-aloe SHARED ${SOURCES}) -set_target_properties(${module}-aloe PROPERTIES OUTPUT_NAME ${module}) -set_target_properties(${module}-aloe PROPERTIES COMPILE_FLAGS "-D_COMPILE_ALOE") -target_link_libraries(${module}-aloe oesrapi skeleton ${LIBRARIES}) -install(TARGETS ${module}-aloe DESTINATION lib/${MODULE_REPOS_NAME}/) - - -if (NOT ${TEST_SOURCES} STREQUAL "") - # standalone program for testing - add_executable(${module}-bin ${SOURCES} ${TEST_SOURCES}) - set_target_properties(${module}-bin PROPERTIES OUTPUT_NAME ${module}) - set_target_properties(${module}-bin PROPERTIES COMPILE_FLAGS "-D_COMPILE_STANDALONE") - target_link_libraries(${module}-bin standalone ${LIBRARIES}) - install(TARGETS ${module}-bin DESTINATION bin) -endif() - -# octave mex file -set(install_mex "") -if(NOT $ENV{OCTAVE_INCLUDE} STREQUAL "") - if(NOT $ENV{OCTAVE_LIBS} STREQUAL "") - - add_library(${module}-oct SHARED ${SOURCES}) - set_target_properties(${module}-oct PROPERTIES OUTPUT_NAME ${module}) - set_target_properties(${module}-oct PROPERTIES PREFIX "am_") - set_target_properties(${module}-oct PROPERTIES SUFFIX .mex) - - set_target_properties(${module}-oct PROPERTIES COMPILE_FLAGS "-I$ENV{OCTAVE_INCLUDE} -D_COMPILE_MEX -Wl,-Bsymbolic -L$ENV{OCTAVE_LIBS} -loctinterp -loctave -lcruft -Wl,-Bsymbolic-functions -Wl,-z,relro") - target_link_libraries(${module}-oct aloe_octave ${LIBRARIES}) - install(TARGETS ${module}-oct DESTINATION mex) - - endif() -endif() - -#matlab mex -if(NOT $ENV{MATLAB_ROOT} STREQUAL "") - add_library(${module}-mat SHARED ${SOURCES}) - set_target_properties(${module}-mat PROPERTIES OUTPUT_NAME ${module}) - set_target_properties(${module}-mat PROPERTIES PREFIX "am_") - - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - set_target_properties(${module}-mat PROPERTIES SUFFIX .mexa64) - set_target_properties(${module}-mat PROPERTIES COMPILE_FLAGS "-I$ENV{MATLAB_ROOT} -O -pthread -shared -Wl,--version-script,$ENV{MATLAB_ROOT}/extern/lib/glnxa64/mexFunction.map -Wl,--no-undefined -Wl,-rpath-link,$ENV{MATLAB_ROOT}/bin/glnxa64 -L$ENV{MATLAB_ROOT}/bin/glnxa64 -lmx -lmex -lmat -lm -lstdc++") - else() - set_target_properties(${module}-mat PROPERTIES SUFFIX .mexglx) - set_target_properties(${module}-mat PROPERTIES COMPILE_FLAGS "-I$ENV{MATLAB_ROOT} -O -pthread -shared -m32 -Wl,--version-script,$ENV{MATLAB_ROOT}/extern/lib/glnx86/mexFunction.map -Wl,--no-undefined -Wl,-rpath-link,$ENV{MATLAB_ROOT}/bin/glnx86 -L$ENV{MATLAB_ROOT}/bin/glnx86 -lmx -lmex -lmat -lm -lstdc++") - endif() - - target_link_libraries(${module}-mat aloe_matlab ${LIBRARIES}) - install(TARGETS ${module}-mat DESTINATION mex) - -endif() - - - - - - - - - - - diff --git a/scripts/xml2aloe/template/src/template.c b/scripts/xml2aloe/template/src/template.c deleted file mode 100644 index 41bc2dbbc..000000000 --- a/scripts/xml2aloe/template/src/template.c +++ /dev/null @@ -1,100 +0,0 @@ -/** - * - * \section COPYRIGHT - * - * Copyright 2013-2015 The srsLTE Developers. See the - * COPYRIGHT file at the top-level directory of this distribution. - * - * \section LICENSE - * - * This file is part of the srsLTE library. - * - * srsLTE is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of - * the License, or (at your option) any later version. - * - * srsLTE is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * A copy of the GNU Affero General Public License can be found in - * the LICENSE file in the top-level directory of this distribution - * and at http://www.gnu.org/licenses/. - * - */ - -/* - * This file has been automatically generated from -name- - */ - -#include -#include -#include -#include - -#include "-name-.h" - --name-_hl -name-; - ---input_parameters_handlers-- - -int out_len[NOF_OUTPUT_ITF]; - -int initialize() { - - /* Initialization Parameters */ - --init_parameters-- - - /* Input Control Parameters */ - --input_parameters_getid-- - - /* Initialization function */ - return -name-_initialize(&-name-); -} - - -int work(void **inp, void **out) { - int i,n; -#if NOF_INPUTS>1 - for (i=0;i1 - for (i=0;i - -typedef -typeinput- input_t; -typedef -typeoutput- output_t; - -#define INPUT_MAX_SAMPLES -sizeinput- -#define OUTPUT_MAX_SAMPLES -sizeoutput- - -#define NOF_INPUT_ITF -numinputs- -#define NOF_OUTPUT_ITF -numoutputs- - -#endif -/**@} */ - -#ifndef INCLUDE_DEFS_ONLY - -/* Input and output buffer sizes (in number of samples) */ -const int input_max_samples = INPUT_MAX_SAMPLES; -const int output_max_samples = OUTPUT_MAX_SAMPLES; - -/* leave these two lines unmodified */ -const int input_sample_sz = sizeof(input_t); -int output_sample_sz = sizeof(output_t); - -/* Number of I/O interfaces. All have the same maximum size */ -const int nof_input_itf = NOF_INPUT_ITF; -const int nof_output_itf = NOF_OUTPUT_ITF; - -#endif diff --git a/scripts/xml2aloe/template/test/test_generate.c b/scripts/xml2aloe/template/test/test_generate.c deleted file mode 100644 index e794eb68c..000000000 --- a/scripts/xml2aloe/template/test/test_generate.c +++ /dev/null @@ -1,83 +0,0 @@ -/** - * - * \section COPYRIGHT - * - * Copyright 2013-2015 The srsLTE Developers. See the - * COPYRIGHT file at the top-level directory of this distribution. - * - * \section LICENSE - * - * This file is part of the srsLTE library. - * - * srsLTE is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as - * published by the Free Software Foundation, either version 3 of - * the License, or (at your option) any later version. - * - * srsLTE is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * A copy of the GNU Affero General Public License can be found in - * the LICENSE file in the top-level directory of this distribution - * and at http://www.gnu.org/licenses/. - * - */ - - -/* Functions that generate the test data fed into the DSP modules being developed */ -#include -#include -#include - -#include -#include - -#define INCLUDE_DEFS_ONLY -#include "template.h" - -int offset=0; - -/** - * Generates input signal. VERY IMPORTANT to fill length vector with the number of - * samples that have been generated. - * @param inp Input interface buffers. Data from other interfaces is stacked in the buffer. - * Use in(ptr,idx) to access the address. - * - * @param lengths Save on n-th position the number of samples generated for the n-th interface - */ -int generate_input_signal(void *in, int *lengths) -{ - int i; - input_t *input = in; - int block_length; - pmid_t blen_id; - - blen_id = param_id("block_length"); - if (!blen_id) { - moderror("Parameter block_length not found\n"); - return -1; - } - if (!param_get_int(blen_id,&block_length)) { - moderror("Getting integer parameter block_length\n"); - return -1; - } - - modinfo_msg("Parameter block_length is %d\n",block_length); - - - /** HERE INDICATE THE LENGTH OF THE SIGNAL */ - lengths[0] = block_length; - - for (i=0;iinput,hl->output,hl->ctrl_in.variance,hl->in_len); - hl->out_len = hl->in_len; - return 0; -} - -int srslte_ch_awgn_stop(srslte_ch_awgn_hl* hl) { - return 0; -} diff --git a/srslte/lib/cuhd/src/cuhd_imp.cpp b/srslte/lib/cuhd/src/cuhd_imp.cpp index ece033074..436dae607 100644 --- a/srslte/lib/cuhd/src/cuhd_imp.cpp +++ b/srslte/lib/cuhd/src/cuhd_imp.cpp @@ -370,7 +370,7 @@ double cuhd_set_tx_gain(void *h, double gain) { cuhd_handler *handler = static_cast < cuhd_handler * >(h); handler->usrp->set_tx_gain(gain); - return handler->usrp->get_tx_gain(); + return gain; } double cuhd_set_tx_srate(void *h, double freq) diff --git a/srslte/lib/fec/src/convcoder.c b/srslte/lib/fec/src/convcoder.c index c8f4e74b6..7dc38673e 100644 --- a/srslte/lib/fec/src/convcoder.c +++ b/srslte/lib/fec/src/convcoder.c @@ -65,24 +65,3 @@ int srslte_convcoder_encode(srslte_convcoder_t *q, uint8_t *input, uint8_t *outp } } - - -int srslte_convcoder_initialize(srslte_convcoder_hl* h) { - return 0; -} - -int srslte_convcoder_work(srslte_convcoder_hl* hl) { - - hl->obj.K = hl->ctrl_in.constraint_length; - hl->obj.R = hl->ctrl_in.rate; - hl->obj.poly[0] = hl->ctrl_in.generator_0; - hl->obj.poly[1] = hl->ctrl_in.generator_1; - hl->obj.poly[2] = hl->ctrl_in.generator_2; - hl->obj.tail_biting = hl->ctrl_in.tail_bitting?true:false; - hl->out_len = srslte_convcoder_encode(&hl->obj, hl->input, hl->output, hl->in_len); - return 0; -} - -int srslte_convcoder_stop(srslte_convcoder_hl* h) { - return 0; -} diff --git a/srslte/lib/fec/src/rm_conv.c b/srslte/lib/fec/src/rm_conv.c index 5f1a8cea6..04efb3087 100644 --- a/srslte/lib/fec/src/rm_conv.c +++ b/srslte/lib/fec/src/rm_conv.c @@ -153,26 +153,3 @@ int srslte_rm_conv_rx(float *input, uint32_t in_len, float *output, uint32_t out return 0; } -/** High-level API */ - -int srslte_rm_conv_initialize(srslte_rm_conv_hl* h) { - - return 0; -} - -/** This function can be called in a subframe (1ms) basis */ -int srslte_rm_conv_work(srslte_rm_conv_hl* hl) { - if (hl->init.direction) { - srslte_rm_conv_tx(hl->input, hl->in_len, hl->output, hl->ctrl_in.E); - hl->out_len = hl->ctrl_in.E; - } else { - srslte_rm_conv_rx(hl->input, hl->in_len, hl->output, hl->ctrl_in.S); - hl->out_len = hl->ctrl_in.S; - } - return 0; -} - -int srslte_rm_conv_stop(srslte_rm_conv_hl* hl) { - return 0; -} - diff --git a/srslte/lib/fec/src/rm_turbo.c b/srslte/lib/fec/src/rm_turbo.c index 6502b64fd..f4fa30f95 100644 --- a/srslte/lib/fec/src/rm_turbo.c +++ b/srslte/lib/fec/src/rm_turbo.c @@ -716,20 +716,3 @@ int srslte_rm_turbo_rx(float *w_buff, uint32_t w_buff_len, float *input, uint32_ return 0; } - - -/** High-level API */ - -int srslte_rm_turbo_initialize(srslte_rm_turbo_hl* h) { - return 0; -} - -/** This function can be called in a subframe (1ms) basis */ -int srslte_rm_turbo_work(srslte_rm_turbo_hl* hl) { - return 0; -} - -int srslte_rm_turbo_stop(srslte_rm_turbo_hl* hl) { - return 0; -} - diff --git a/srslte/lib/fec/src/viterbi.c b/srslte/lib/fec/src/viterbi.c index 579a74f1f..d60a913d1 100644 --- a/srslte/lib/fec/src/viterbi.c +++ b/srslte/lib/fec/src/viterbi.c @@ -231,52 +231,3 @@ int srslte_viterbi_decode_uc(srslte_viterbi_t *q, uint8_t *symbols, uint8_t *dat uint32_t frame_length) { return q->decode(q, symbols, data, frame_length); } - -int srslte_viterbi_initialize(srslte_viterbi_hl* h) { - uint32_t poly[3]; - srslte_viterbi_type_t type; - if (h->init.rate == 2) { - if (h->init.constraint_length == 7) { - type = SRSLTE_VITERBI_27; - } else if (h->init.constraint_length == 9) { - type = SRSLTE_VITERBI_29; - } else { - fprintf(stderr, "Unsupported decoder %d/%d\n", h->init.rate, - h->init.constraint_length); - return -1; - } - } else if (h->init.rate == 3) { - if (h->init.constraint_length == 7) { - type = SRSLTE_VITERBI_37; - } else if (h->init.constraint_length == 9) { - type = SRSLTE_VITERBI_39; - } else { - fprintf(stderr, "Unsupported decoder %d/%d\n", h->init.rate, - h->init.constraint_length); - return -1; - } - } else { - fprintf(stderr, "Unsupported decoder %d/%d\n", h->init.rate, - h->init.constraint_length); - return -1; - } - poly[0] = h->init.generator_0; - poly[1] = h->init.generator_1; - poly[2] = h->init.generator_2; - return srslte_viterbi_init(&h->obj, type, poly, (uint32_t) h->init.frame_length, - h->init.tail_bitting ? true : false); -} - -int srslte_viterbi_work(srslte_viterbi_hl* hl) { - if (hl->in_len != hl->init.frame_length) { - fprintf(stderr, "Expected input length %d but got %d\n", - hl->init.frame_length, hl->in_len); - return -1; - } - return srslte_viterbi_decode_f(&hl->obj, hl->input, hl->output, hl->init.frame_length); -} - -int srslte_viterbi_stop(srslte_viterbi_hl* h) { - srslte_viterbi_free(&h->obj); - return 0; -} diff --git a/srslte/lib/io/src/binsource.c b/srslte/lib/io/src/binsource.c index 398d48140..3364528ba 100644 --- a/srslte/lib/io/src/binsource.c +++ b/srslte/lib/io/src/binsource.c @@ -134,47 +134,3 @@ int srslte_binsource_generate(srslte_binsource_t* q, uint8_t *bits, int nbits) { - - -/* High-Level API */ -int srslte_binsource_initialize(srslte_binsource_hl* hl) { - - srslte_binsource_init(&hl->obj); - if (hl->init.seed) { - srslte_binsource_seed_set(&hl->obj,hl->init.seed); - } else { - srslte_binsource_seed_time(&hl->obj); - } - - if (hl->init.cache_seq_nbits) { - if (srslte_binsource_cache_gen(&hl->obj,hl->init.cache_seq_nbits)) { - return -1; - } - } - - return 0; -} - - -int srslte_binsource_work(srslte_binsource_hl* hl) { - int ret = -1; - - if (hl->init.cache_seq_nbits) { - srslte_binsource_cache_cpy(&hl->obj,hl->output,hl->ctrl_in.nbits); - ret = 0; - } else { - ret = srslte_binsource_generate(&hl->obj,hl->output,hl->ctrl_in.nbits); - } - if (!ret) { - hl->out_len = hl->ctrl_in.nbits; - } else { - hl->out_len = 0; - } - return ret; -} - -int srslte_binsource_stop(srslte_binsource_hl* hl) { - srslte_binsource_free(&hl->obj); - return 0; -} - diff --git a/srslte/lib/io/src/filesink.c b/srslte/lib/io/src/filesink.c index ef72f9fc8..29ed0412b 100644 --- a/srslte/lib/io/src/filesink.c +++ b/srslte/lib/io/src/filesink.c @@ -100,20 +100,3 @@ int srslte_filesink_write(srslte_filesink_t *q, void *buffer, int nsamples) { return i; } - - -int srslte_filesink_initialize(srslte_filesink_hl* h) { - return srslte_filesink_init(&h->obj, h->init.file_name, h->init.data_type); -} - -int srslte_filesink_work(srslte_filesink_hl* h) { - if (srslte_filesink_write(&h->obj, h->input, h->in_len)<0) { - return -1; - } - return 0; -} - -int srslte_filesink_stop(srslte_filesink_hl* h) { - srslte_filesink_free(&h->obj); - return 0; -} diff --git a/srslte/lib/io/src/filesource.c b/srslte/lib/io/src/filesource.c index 5343fd3e2..08efefb81 100644 --- a/srslte/lib/io/src/filesource.c +++ b/srslte/lib/io/src/filesource.c @@ -117,20 +117,3 @@ int srslte_filesource_read(srslte_filesource_t *q, void *buffer, int nsamples) { return i; } - -int srslte_filesource_initialize(srslte_filesource_hl* h) { - return srslte_filesource_init(&h->obj, h->init.file_name, h->init.data_type); -} - -int srslte_filesource_work(srslte_filesource_hl* h) { - h->out_len = srslte_filesource_read(&h->obj, h->output, h->ctrl_in.nsamples); - if (h->out_len < 0) { - return -1; - } - return 0; -} - -int srslte_filesource_stop(srslte_filesource_hl* h) { - srslte_filesource_free(&h->obj); - return 0; -} diff --git a/srslte/lib/io/src/netsink.c b/srslte/lib/io/src/netsink.c index 286eb8974..016b924cd 100644 --- a/srslte/lib/io/src/netsink.c +++ b/srslte/lib/io/src/netsink.c @@ -105,20 +105,3 @@ int srslte_netsink_write(srslte_netsink_t *q, void *buffer, int nof_bytes) { return n; } - - -int srslte_netsink_initialize(srslte_netsink_hl* h) { - return srslte_netsink_init(&h->obj, h->init.address, h->init.port, SRSLTE_NETSINK_UDP); -} - -int srslte_netsink_work(srslte_netsink_hl* h) { - if (srslte_netsink_write(&h->obj, h->input, h->in_len)<0) { - return -1; - } - return 0; -} - -int srslte_netsink_stop(srslte_netsink_hl* h) { - srslte_netsink_free(&h->obj); - return 0; -} diff --git a/srslte/lib/io/src/netsource.c b/srslte/lib/io/src/netsource.c index 81b1bce12..b285068f1 100644 --- a/srslte/lib/io/src/netsource.c +++ b/srslte/lib/io/src/netsource.c @@ -124,20 +124,3 @@ int srslte_netsource_set_timeout(srslte_netsource_t *q, uint32_t microseconds) { } return 0; } - -int srslte_netsource_initialize(srslte_netsource_hl* h) { - return srslte_netsource_init(&h->obj, h->init.address, h->init.port, SRSLTE_NETSOURCE_UDP); -} - -int srslte_netsource_work(srslte_netsource_hl* h) { - h->out_len = srslte_netsource_read(&h->obj, h->output, h->ctrl_in.nsamples); - if (h->out_len < 0) { - return -1; - } - return 0; -} - -int srslte_netsource_stop(srslte_netsource_hl* h) { - srslte_netsource_free(&h->obj); - return 0; -} diff --git a/srslte/lib/modem/src/demod_hard.c b/srslte/lib/modem/src/demod_hard.c index 742ec05a1..b264dbc8a 100644 --- a/srslte/lib/modem/src/demod_hard.c +++ b/srslte/lib/modem/src/demod_hard.c @@ -66,21 +66,3 @@ int srslte_demod_hard_demodulate(srslte_demod_hard_t* q, cf_t* symbols, uint8_t } -int srslte_demod_hard_initialize(srslte_demod_hard_hl* hl) { - srslte_demod_hard_init(&hl->obj); - srslte_demod_hard_table_set(&hl->obj,hl->init.std); - - return 0; -} - -int srslte_demod_hard_work(srslte_demod_hard_hl* hl) { - int ret = srslte_demod_hard_demodulate(&hl->obj,hl->input,hl->output,hl->in_len); - hl->out_len = ret; - return 0; -} - -int srslte_demod_hard_stop(srslte_demod_hard_hl* hl) { - return 0; -} - - diff --git a/srslte/lib/modem/src/demod_soft.c b/srslte/lib/modem/src/demod_soft.c index c703801da..808ee1406 100644 --- a/srslte/lib/modem/src/demod_soft.c +++ b/srslte/lib/modem/src/demod_soft.c @@ -282,19 +282,3 @@ int srslte_demod_soft_demodulate_s(srslte_mod_t modulation, const cf_t* symbols, } return 0; } - - -/* High-Level API */ -int srslte_demod_soft_initialize(srslte_demod_soft_hl* hl) { - return 0; -} - -int srslte_demod_soft_work(srslte_demod_soft_hl* hl) { - int ret = srslte_demod_soft_demodulate(hl->init.std,hl->input,hl->output,hl->in_len); - hl->out_len = ret; - return 0; -} - -int srslte_demod_soft_stop(srslte_demod_soft_hl* hl) { - return 0; -} diff --git a/srslte/lib/modem/src/mod.c b/srslte/lib/modem/src/mod.c index 34b4d5aff..53c927855 100644 --- a/srslte/lib/modem/src/mod.c +++ b/srslte/lib/modem/src/mod.c @@ -152,27 +152,3 @@ int srslte_mod_modulate_bytes(srslte_modem_table_t* q, uint8_t *bits, cf_t* symb } return nbits/q->nbits_x_symbol; } - - -/* High-Level API */ -int mod_initialize(srslte_mod_hl* hl) { - srslte_modem_table_init(&hl->obj); - if (srslte_modem_table_lte(&hl->obj,hl->init.std)) { - return -1; - } - - return 0; -} - -int mod_work(srslte_mod_hl* hl) { - int ret = srslte_mod_modulate(&hl->obj,hl->input,hl->output,hl->in_len); - hl->out_len = ret; - return 0; -} - -int mod_stop(srslte_mod_hl* hl) { - srslte_modem_table_free(&hl->obj); - return 0; -} - - diff --git a/srslte/lib/sync/src/sss.c b/srslte/lib/sync/src/sss.c index 14c833fa3..f4d3de370 100644 --- a/srslte/lib/sync/src/sss.c +++ b/srslte/lib/sync/src/sss.c @@ -151,30 +151,3 @@ int srslte_sss_synch_N_id_1(srslte_sss_synch_t *q, uint32_t m0, uint32_t m1) { } return N_id_1; } - -/** High-level API */ - -int srslte_sss_synch_initialize(srslte_sss_synch_hl* h) { - - if (srslte_sss_synch_init(&h->obj, 128)) { - return SRSLTE_ERROR; - } - srslte_sss_synch_set_N_id_2(&h->obj, h->init.N_id_2); - - return SRSLTE_SUCCESS; -} - -int srslte_sss_synch_work(srslte_sss_synch_hl* hl) { - - if (hl->ctrl_in.correlation_threshold) { - srslte_sss_synch_set_threshold(&hl->obj, hl->ctrl_in.correlation_threshold); - } - - return SRSLTE_SUCCESS; -} - -int srslte_sss_synch_stop(srslte_sss_synch_hl* hl) { - srslte_sss_synch_free(&hl->obj); - return SRSLTE_SUCCESS; -} - diff --git a/srslte/lib/ue/src/ue_ul.c b/srslte/lib/ue/src/ue_ul.c index a62d7dab6..a188af319 100644 --- a/srslte/lib/ue/src/ue_ul.c +++ b/srslte/lib/ue/src/ue_ul.c @@ -418,7 +418,7 @@ int srslte_ue_ul_srs_encode(srslte_ue_ul_t *q, uint32_t tti, cf_t *output_signal } if (q->normalize_en) { - float norm_factor = (float) q->cell.nof_prb/20/sqrtf(srslte_refsignal_srs_M_sc(&q->signals)/6); + float norm_factor = (float) q->cell.nof_prb/15/sqrtf(srslte_refsignal_srs_M_sc(&q->signals)); srslte_vec_sc_prod_cfc(output_signal, norm_factor, output_signal, SRSLTE_SF_LEN_PRB(q->cell.nof_prb)); }