AzAutoGen.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. #!/usr/bin/python
  2. # Copyright (c) Contributors to the Open 3D Engine Project.
  3. # For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. #
  5. # SPDX-License-Identifier: Apache-2.0 OR MIT
  6. import io
  7. import os
  8. import re
  9. import sys
  10. import time
  11. import errno
  12. import fnmatch
  13. import fileinput
  14. import logging
  15. import argparse
  16. import hashlib
  17. import pathlib
  18. from xml.sax.saxutils import escape, unescape, quoteattr
  19. logging.basicConfig(format='[%(levelname)s] %(name)s: %(message)s')
  20. logger = logging.getLogger('AzAutoGen')
  21. logger.setLevel(logging.INFO)
  22. # Maximum number of errors before bailing on AutoGen
  23. MAX_ERRORS = 100
  24. errorCount = 0
  25. class AutoGenConfig:
  26. def __init__(self, targetName, cacheDir, outputDir, projectDir, inputFiles, expansionRules, dryrun, verbose, pythonPaths):
  27. self.targetName = targetName
  28. self.cacheDir = cacheDir
  29. self.outputDir = outputDir
  30. self.projectDir = projectDir
  31. self.inputFiles = inputFiles
  32. self.expansionRules = expansionRules
  33. self.dryrun = dryrun
  34. self.verbose = verbose
  35. self.pythonPaths = pythonPaths
  36. def SanitizeTargetName(targetName):
  37. return re.sub(r'[^\w]', '', targetName.lstrip('0123456789'))
  38. def ParseInputFile(inputFilePath):
  39. result = []
  40. if inputFilePath:
  41. with open(inputFilePath, 'r') as file:
  42. # input files are expected to be separated by semicolon at first line
  43. inputFileContent = file.readline()
  44. inputFiles = inputFileContent.strip().split(";")
  45. result = inputFiles
  46. return result
  47. def PrintError(*objs):
  48. print(*objs, file=sys.stderr)
  49. global errorCount
  50. errorCount += 1
  51. if errorCount > MAX_ERRORS:
  52. print("Maximum errors exceeded (%d) please check the tty for errors" % MAX_ERRORS, file=sys.stderr)
  53. sys.exit(1)
  54. def PrintUnhandledExcptionInfo():
  55. print("An unexpected error occurred, please report the error you encountered and include your build output", file=sys.stderr)
  56. def TransformEscape(string):
  57. return escape(quoteattr(unescape(string)))
  58. def BooleanTrue(string):
  59. testString = string.lower().strip()
  60. return testString == "true" or testString == "1"
  61. def CamelToHuman(string):
  62. return string[0].upper() + re.sub(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))', r' \1', string[1:])
  63. def StripFloat(string):
  64. return re.sub(r'(\d+(\.\d*)?|\.\d+)f', r'\g<1>0', string)
  65. def CreateHashGuid(string):
  66. hash = hashlib.new('md5')
  67. hash.update(string.encode('utf-8'))
  68. hashStr = hash.hexdigest()
  69. return ("{" + hashStr[0:8] + "-" + hashStr[8:12] + "-" + hashStr[12:16] + "-" + hashStr[16:20] + "-" + hashStr[20:] + "}").upper()
  70. def CreateAZHashValue64(btyes):
  71. hash = hashlib.new('sha256')
  72. hash.update(btyes)
  73. hashStr = hash.hexdigest()
  74. return ("AZ::HashValue64{ 0x" + hashStr[0:16] + " }") # grab the first 64-bits of a sha256; any 64-bits of a sha256 are just as secure as any other 64.
  75. def EtreeToString(xmlNode):
  76. return etree.tostring(xmlNode)
  77. def EtreeToStringStripped(xmlNode):
  78. for elem in xmlNode.iter():
  79. if elem.text: elem.text = elem.text.strip()
  80. if elem.tail: elem.tail = elem.tail.strip()
  81. return etree.tostring(xmlNode)
  82. def SanitizePath(path):
  83. return (path or '').replace('\\', '/').replace('//', '/')
  84. def SearchPaths(filename, paths=[]):
  85. if len(paths) > 0:
  86. for path in paths:
  87. testFile = os.path.join(path, filename)
  88. if os.path.exists(testFile):
  89. return os.path.abspath(testFile)
  90. if os.path.exists(filename):
  91. return os.path.abspath(filename)
  92. return None
  93. def ComputeOutputPath(inputFiles, projectDir, outputDir):
  94. commonInputPath = os.path.commonpath(inputFiles) # If we've globbed many source files, this finds the common path
  95. if os.path.isfile(commonInputPath): # If the commonInputPath resolves to an actual file, slice off the filename
  96. commonInputPath = os.path.dirname(commonInputPath)
  97. commonPath = os.path.commonpath([commonInputPath, projectDir]) # Finds the common path between the data source files and our project directory (//depot/dev/Code/Framework/AzCore/)
  98. inputRelativePath = os.path.relpath(commonInputPath, commonPath) # Computes the relative path for the project source directory (Code/Framework/AzCore/AutoGen/)
  99. return os.path.join(outputDir, inputRelativePath) # Returns a suitable output directory (//depot/dev/Generated/Code/Framework/AzCore/AutoGen/)
  100. def ProcessTemplateConversion(autogenConfig, dataInputSet, dataInputFiles, templateFile, outputFile, templateCache):
  101. if autogenConfig.dryrun or not dataInputFiles:
  102. return
  103. try:
  104. outputFile = os.path.abspath(outputFile)
  105. outputPath = os.path.dirname(outputFile)
  106. treeRoots = []
  107. for dataInputFile in sorted(dataInputFiles):
  108. try:
  109. if dataInputFile in dataInputSet.keys():
  110. treeRoots.append(dataInputSet.get(dataInputFile))
  111. elif os.path.splitext(dataInputFile)[1] == ".xml":
  112. xml = etree.parse(dataInputFile)
  113. # xml.xinclude()
  114. xmlroot = xml.getroot()
  115. # look for an xml schema link for this document
  116. # xmlSchema = None
  117. # if 'xsi' in xmlroot.nsmap:
  118. # XMLSchemaNamespace = xmlroot.nsmap['xsi']
  119. # schemaLink = xmlroot.get('{' + XMLSchemaNamespace + '}schemaLocation')
  120. # if schemaLink is None:
  121. # schemaLink = xmlroot.attrib['{' + XMLSchemaNamespace + '}noNamespaceSchemaLocation']
  122. # if schemaLink:
  123. # # if we have a schemaLink, then we need to strip off the relative pathing and use our search paths
  124. # # relative pathing on the xml file itself is purely a nicety for Visual Studio to find the correct XSD for inline validation
  125. # xmlSchema = os.path.basename(schemaLink)
  126. # if xmlSchema:
  127. # # check the template directory, the template include dir, and the folder that houses the nvdef file, and the xml's location for the xsd
  128. # searchPaths = [os.path.dirname(templateFile)]
  129. # searchPaths += [os.path.dirname(dataInputFile)]
  130. # xmlShemaLoc = SearchPaths(xmlSchema, searchPaths)
  131. # try:
  132. # xmlSchemaDoc = etree.parse(xmlShemaLoc)
  133. # xmlSchemaObj = etree.XMLSchema(xmlSchemaDoc, attribute_defaults=True)
  134. # xmlSchemaObj.assertValid(xmlroot)
  135. # except etree.DocumentInvalid as e:
  136. # for error in e.error_log:
  137. # PrintError('%s(%d) : error InvalidXML %s' % (os.path.abspath(dataInputFile), error.line, error.message))
  138. # except IOError as e:
  139. # PrintError('%s(%s) : %s' % (os.path.abspath(dataInputFile), str(1), e.message))
  140. xmlroot = xml.getroot()
  141. dataInputSet[dataInputFile] = xml.getroot()
  142. treeRoots.append(xml.getroot())
  143. else:
  144. with open(dataInputFile) as jsonFile:
  145. jsonData = json.load(jsonFile)
  146. dataInputSet[dataInputFile] = jsonData
  147. treeRoots.append(jsonData)
  148. except IOError as e:
  149. PrintError('%s(%s) : %s' % (fileinput.filename(), str(fileinput.filelineno()), e.message))
  150. # except etree.XMLSyntaxError as e:
  151. # for error in e.error_log:
  152. # PrintError('%s(%s) : error XMLSyntaxError %s' % (os.path.abspath(dataInputFile), error.line, error.message))
  153. compareFD = io.StringIO()
  154. searchPaths = [os.path.dirname(templateFile)]
  155. templateLoader = jinja2.FileSystemLoader(searchpath = searchPaths)
  156. templateEnv = jinja2.Environment(bytecode_cache = templateCache, loader = templateLoader, trim_blocks = True, extensions = ["jinja2.ext.do",])
  157. templateEnv.filters['relpath' ] = lambda x: os.path.relpath(x, outputPath)
  158. templateEnv.filters['dirname' ] = os.path.dirname
  159. templateEnv.filters['basename' ] = os.path.basename
  160. templateEnv.filters['splitext' ] = os.path.splitext
  161. templateEnv.filters['split' ] = os.path.split
  162. templateEnv.filters['startswith' ] = str.startswith
  163. templateEnv.filters['int' ] = int
  164. templateEnv.filters['str' ] = str
  165. templateEnv.filters['escape' ] = TransformEscape
  166. templateEnv.filters['len' ] = len
  167. templateEnv.filters['range' ] = range
  168. templateEnv.filters['stripFloat' ] = StripFloat
  169. templateEnv.filters['camelToHuman' ] = CamelToHuman
  170. templateEnv.filters['booleanTrue' ] = BooleanTrue
  171. templateEnv.filters['createHashGuid'] = CreateHashGuid
  172. templateEnv.filters['createAZHashValue64'] = CreateAZHashValue64
  173. templateEnv.filters['etreeToString' ] = EtreeToString
  174. templateEnv.filters['etreeToStringStripped' ] = EtreeToStringStripped
  175. templateJinja = templateEnv.get_template(os.path.basename(templateFile))
  176. templateVars = \
  177. { \
  178. "autogenTargetName": autogenConfig.targetName, \
  179. "dataFiles" : treeRoots, \
  180. "dataFileNames" : dataInputFiles, \
  181. "templateName" : templateFile, \
  182. "outputFile" : outputFile, \
  183. "filename" : os.path.splitext(os.path.basename(outputFile))[0], \
  184. }
  185. try:
  186. outputExtension = os.path.splitext(outputFile)[1]
  187. if outputExtension == ".xml" or outputExtension == ".xhtml" or outputExtension == ".xsd":
  188. compareFD.write('<?xml version="1.0"?>\n')
  189. compareFD.write('<!-- Copyright (c) Contributors to the Open 3D Engine Project. -->\n')
  190. compareFD.write('<!-- For complete copyright and license terms please see the LICENSE at the root of this distribution. -->\n')
  191. compareFD.write('\n')
  192. compareFD.write('<!-- SPDX-License-Identifier: Apache-2.0 OR MIT -->\n')
  193. compareFD.write('\n')
  194. compareFD.write('<!-- This file is generated automatically at compile time, DO NOT EDIT BY HAND -->\n')
  195. compareFD.write('<!-- Template Source {0};\n * XML Sources {1}-->\n'.format(templateFile, ', '.join(dataInputFiles)))
  196. compareFD.write('\n')
  197. elif outputExtension == ".lua":
  198. compareFD.write('-- Copyright (c) Contributors to the Open 3D Engine Project.\n')
  199. compareFD.write('-- For complete copyright and license terms please see the LICENSE at the root of this distribution.\n')
  200. compareFD.write('\n')
  201. compareFD.write('-- SPDX-License-Identifier: Apache-2.0 OR MIT\n')
  202. compareFD.write('\n')
  203. compareFD.write('-- This file is generated automatically at compile time, DO NOT EDIT BY HAND\n')
  204. compareFD.write('-- Template Source {0};\n * XML Sources {1}\n'.format(templateFile, ', '.join(dataInputFiles)))
  205. compareFD.write('\n')
  206. elif outputExtension == ".h" or outputExtension == ".hpp" or outputExtension == ".inl" or outputExtension == ".c" or outputExtension == ".cpp":
  207. compareFD.write('/*\n')
  208. compareFD.write(' * Copyright (c) Contributors to the Open 3D Engine Project.\n')
  209. compareFD.write(' * For complete copyright and license terms please see the LICENSE at the root of this distribution.\n')
  210. compareFD.write(' *\n')
  211. compareFD.write(' * SPDX-License-Identifier: Apache-2.0 OR MIT\n')
  212. compareFD.write(' *\n')
  213. compareFD.write(' * This file is generated automatically at compile time, DO NOT EDIT BY HAND\n')
  214. compareFD.write(' * Template Source {0};\n * Data Sources {1}\n'.format(templateFile, ', '.join(dataInputFiles)))
  215. compareFD.write(' */\n')
  216. compareFD.write('\n')
  217. compareFD.write(templateJinja.render(templateVars))
  218. compareFD.write('\n')
  219. except jinja2.exceptions.TemplateNotFound as e:
  220. PrintError('%s(1) : error TemplateNotFound %s' % (os.path.abspath(templateFile), e.message))
  221. except IOError as e:
  222. PrintError('%s(%s) : error I/O(%s) accessing %s : %s' % (fileinput.filename(), str(fileinput.filelineno()), e.errno, e.filename, e.strerror))
  223. except jinja2.exceptions.TemplateSyntaxError as e:
  224. PrintError('%s(%s) : error Template processing error: %s' % (os.path.abspath(e.filename), e.lineno, e.message))
  225. except jinja2.exceptions.UndefinedError as e:
  226. # Sadly, jinja doesn't provide the exact line of the template that had this error since the template is compiled directly to python code
  227. PrintError('%s(1) : error Template processing error: %s with %s' % (os.path.abspath(templateFile), e.message, ', '.join([os.path.basename(dataInputFile) for dataInputFile in dataInputFiles])))
  228. try:
  229. os.makedirs(os.path.dirname(outputFile))
  230. except OSError as e:
  231. if e.errno == errno.EEXIST:
  232. pass
  233. else:
  234. raise
  235. try:
  236. if os.path.isfile(outputFile):
  237. with open(outputFile, 'r+') as currentFile:
  238. currentFileStringData = currentFile.read()
  239. if currentFileStringData == compareFD.getvalue():
  240. if autogenConfig.verbose == True:
  241. print('Generated file %s is unchanged, skipping' % (outputFile))
  242. else:
  243. currentFile.truncate()
  244. with open(outputFile, 'w+') as currentFile:
  245. currentFile.write(compareFD.getvalue())
  246. if autogenConfig.verbose == True:
  247. print(f'Generated {outputFile} with template {templateFile} and inputs, '.join(dataInputFiles))
  248. else:
  249. print('Generated %s' % (os.path.basename(outputFile)))
  250. else:
  251. with open(outputFile, 'w+') as outputFD:
  252. outputFD.write(compareFD.getvalue())
  253. if autogenConfig.verbose == True:
  254. print(f'Generated {outputFile} using template {templateFile} and inputs, '.join(dataInputFiles))
  255. else:
  256. print('Generated %s' % (os.path.basename(outputFile)))
  257. except IOError as e:
  258. PrintError('%s(%s) : error I/O(%s) accessing %s : %s' % (fileinput.filename(), str(fileinput.filelineno()), e.errno, e.filename, e.strerror))
  259. except:
  260. PrintError('%s(%s) : error Processing: %s' % (fileinput.filename(), str(fileinput.filelineno()), line))
  261. PrintUnhandledExcptionInfo()
  262. raise
  263. compareFD.close()
  264. def ProcessExpansionRule(autogenConfig, sourceFiles, templateFiles, templateCache, expansionRule, dataInputSet, outputFiles):
  265. try:
  266. # should be of the format inputFile(s),templateFile,outputFile, where inputFile and outputFile are subject to wildcarding and substitutions
  267. expansionRuleSet = expansionRule.split(",")
  268. inputFiles = expansionRuleSet[0]
  269. templateFile = None
  270. outputFile = expansionRuleSet[2]
  271. for fullPathTemplate in templateFiles:
  272. if expansionRuleSet[1] in fullPathTemplate:
  273. templateFile = fullPathTemplate
  274. break
  275. if templateFile is None:
  276. print("No matching template file found for %s, template may be missing from your _files.cmake" % expansionRuleSet[1])
  277. return
  278. # We have a few potential modes of input to output mapping that we'll have to handle depending on how the user formatted their azdef expansion rule
  279. # if the data input file was explicit
  280. # then output a single file for that explicit data
  281. # else the data is wildcarded
  282. # if the output contains $file or $fileprefix
  283. # then we can generate a *unique* name for each data input, we're in one-to-one mapping mode, create a unique output for each input
  284. # else if the output contains $path
  285. # then we can generate a unique name for each *directory* of data inputs, we're in many-to-one mapping mode, create a unique output for each directory
  286. # else the output is explicit, not wildcarded
  287. # generate a single output file containing all matching data file's
  288. # endif
  289. # endif
  290. testSingle = os.path.join(autogenConfig.projectDir, inputFiles)
  291. if os.path.isfile(testSingle):
  292. # If we specified an *explicit* file to be processed (no wildcards for the data input file foo.json not *.foo.json), this is the branch that handles this case
  293. # This is explicitly one-to-one mapping
  294. dataInputFiles = [os.path.abspath(testSingle)]
  295. outputFileAbsolute = outputFile.replace("$path", ComputeOutputPath(dataInputFiles, autogenConfig.projectDir, autogenConfig.outputDir))
  296. outputFileAbsolute = outputFileAbsolute.replace("$fileprefix", os.path.splitext(os.path.basename(testSingle))[0].split(".")[0])
  297. outputFileAbsolute = outputFileAbsolute.replace("$file", os.path.splitext(os.path.basename(testSingle))[0])
  298. outputFileAbsolute = SanitizePath(outputFileAbsolute)
  299. ProcessTemplateConversion(autogenConfig, dataInputSet, dataInputFiles, templateFile, outputFileAbsolute, templateCache)
  300. outputFiles.append(pathlib.PurePath(outputFileAbsolute))
  301. else:
  302. # We've wildcarded the data input field, so we may have to handle one-to-one mapping of data files to output, or many-to-one mapping of data files to output
  303. if "$fileprefix" in outputFile or "$file" in outputFile:
  304. # Due to the wildcards in the output file, we've determined we'll do a one-to-one mapping of data files to output
  305. for filename in fnmatch.filter(sourceFiles, inputFiles):
  306. dataInputFiles = [os.path.abspath(filename)]
  307. outputFileAbsolute = outputFile.replace("$path", ComputeOutputPath(dataInputFiles, autogenConfig.projectDir, autogenConfig.outputDir))
  308. outputFileAbsolute = outputFileAbsolute.replace("$fileprefix", os.path.splitext(os.path.basename(filename))[0].split(".")[0])
  309. outputFileAbsolute = outputFileAbsolute.replace("$file", os.path.splitext(os.path.basename(filename))[0])
  310. outputFileAbsolute = SanitizePath(outputFileAbsolute)
  311. ProcessTemplateConversion(autogenConfig, dataInputSet, dataInputFiles, templateFile, outputFileAbsolute, templateCache)
  312. outputFiles.append(pathlib.PurePath(outputFileAbsolute))
  313. else:
  314. # Process all matches in one batch
  315. # Due to the lack of wildcards in the output file, we've determined we'll glob all matching input files into the template conversion
  316. dataInputFiles = [os.path.abspath(file) for file in fnmatch.filter(sourceFiles, inputFiles)]
  317. if "$path" in outputFile:
  318. outputFileAbsolute = outputFile.replace("$path", ComputeOutputPath(dataInputFiles, autogenConfig.projectDir, autogenConfig.outputDir))
  319. else: # if no relative $path, put one batch file under outputDir
  320. outputFileAbsolute = os.path.join(autogenConfig.outputDir, outputFile)
  321. outputFileAbsolute = SanitizePath(outputFileAbsolute)
  322. ProcessTemplateConversion(autogenConfig, dataInputSet, dataInputFiles, templateFile, outputFileAbsolute, templateCache)
  323. outputFiles.append(pathlib.PurePath(outputFileAbsolute))
  324. except IOError as e:
  325. PrintError('%s : error I/O(%s) accessing %s : %s' % (expansionRule, e.errno, e.filename, e.strerror))
  326. except:
  327. PrintError('%s : error Processing expansion rule' % expansionRule)
  328. PrintUnhandledExcptionInfo()
  329. raise
  330. def ExecuteExpansionRules(autogenConfig, dataInputSet, outputFiles, pruneNonGenerated):
  331. # Get Globals
  332. global MAX_ERRORS, errorCount
  333. currentPath = os.getcwd()
  334. startTime = time.time()
  335. # Ensure jinja2 template cache dir actually exists...
  336. try:
  337. os.makedirs(autogenConfig.cacheDir)
  338. except OSError as e:
  339. if e.errno == errno.EEXIST:
  340. pass
  341. else:
  342. raise
  343. sourceFiles = []
  344. templateFiles = []
  345. for inputFile in autogenConfig.inputFiles:
  346. if inputFile.endswith(".xml") or inputFile.endswith(".json"):
  347. sourceFiles.append(os.path.join(autogenConfig.projectDir, inputFile))
  348. elif inputFile.endswith(".jinja"):
  349. templateFiles.append(os.path.join(autogenConfig.projectDir, inputFile))
  350. templateCache = jinja2.FileSystemBytecodeCache(autogenConfig.cacheDir)
  351. for expansionRule in autogenConfig.expansionRules:
  352. ProcessExpansionRule(autogenConfig, sourceFiles, templateFiles, templateCache, expansionRule, dataInputSet, outputFiles)
  353. if not autogenConfig.dryrun:
  354. if pruneNonGenerated:
  355. PruneNonGeneratedFiles(autogenConfig, outputFiles)
  356. elapsedTime = time.time() - startTime
  357. millis = int(round(elapsedTime * 10))
  358. m, s = divmod(elapsedTime, 60)
  359. h, m = divmod(m, 60)
  360. print('Total Time %d:%02d:%02d.%02d' % (h, m, s, millis))
  361. # Return true on success
  362. return errorCount == 0
  363. def PruneNonGeneratedFiles(autogenConfig : AutoGenConfig, outputFiles : list[pathlib.PurePath]):
  364. '''
  365. Removes all files from the generated files output directories which was not generated during this invocation
  366. :param autogenConfig: Stores the configuration structure containing the output directory paths for generated files
  367. :param outputFiles: Contains the list of output files generated during the current run
  368. '''
  369. # First generate a set of output directories to iterate using the outputFiles
  370. generatedOutputDirs = set()
  371. for outputFile in outputFiles:
  372. generatedOutputDirs.add(pathlib.Path(outputFile.parent))
  373. # iterate over all the output directories where generated files are output
  374. # and gather a list of files that were not generated during the current invocation
  375. for outputDir in generatedOutputDirs:
  376. filesToRemove = []
  377. if outputDir.is_dir():
  378. for genFile in outputDir.iterdir():
  379. if genFile.is_file() and not genFile in outputFiles:
  380. filesToRemove.append(genFile)
  381. if filesToRemove:
  382. logger.info(f'The following files will be pruned from the generated output directory "{outputDir}":\n' \
  383. f'{[str(path) for path in filesToRemove]}')
  384. for fileToRemove in filesToRemove:
  385. fileToRemove.unlink()
  386. # Main Function
  387. if __name__ == '__main__':
  388. # setup our command syntax
  389. parser = argparse.ArgumentParser()
  390. parser.add_argument("targetName", help="AzAutoGen build target name")
  391. parser.add_argument("cacheDir", help="location to store jinja template cache files")
  392. parser.add_argument("outputDir", help="location to output generated files")
  393. parser.add_argument("projectDir", help="location to build directory against")
  394. parser.add_argument("inputFilePath", help="input file which contains autogen required files to run azcg expansion rules against")
  395. parser.add_argument("expansionRules", help="set of azcg expansion rules for matching data files to template files")
  396. parser.add_argument("-n", "--dryrun", action='store_true', help="does not execute autogen, only outputs the set of files that autogen would generate")
  397. parser.add_argument("-v", "--verbose", action='store_true', help="output only the set of files that would be generated by an expansion run")
  398. parser.add_argument("-p", "--pythonPaths", action='append', nargs='+', default=[""], help="set of additional python paths to use for module imports")
  399. parser.add_argument("--prune", action='store_true', default=False,
  400. help="Prunes any files in the outputDir that was not generated by the current invocation")
  401. args = parser.parse_args()
  402. autogenConfig = AutoGenConfig(SanitizeTargetName(args.targetName),
  403. os.path.abspath(SanitizePath(args.cacheDir)),
  404. os.path.abspath(SanitizePath(args.outputDir)),
  405. os.path.abspath(SanitizePath(args.projectDir)),
  406. ParseInputFile(args.inputFilePath.strip()),
  407. args.expansionRules.split(";"),
  408. args.dryrun,
  409. args.verbose,
  410. args.pythonPaths)
  411. # Import 3rd party modules
  412. for pythonPath in autogenConfig.pythonPaths:
  413. sys.path.append(pythonPath)
  414. import jinja2
  415. #from lxml import etree
  416. import xml.etree.cElementTree as etree
  417. import json
  418. dataInputSet = {}
  419. outputFiles = []
  420. autoGenResult = ExecuteExpansionRules(autogenConfig, dataInputSet, outputFiles, args.prune)
  421. if autogenConfig.dryrun:
  422. print("%s" % ';'.join([str(path) for path in outputFiles]))
  423. if autoGenResult:
  424. sys.exit(0)
  425. else:
  426. sys.exit(1)