Check In tool source code based on Build tool project revision r1655.
git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@8964 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
1919
BaseTools/Source/Python/AutoGen/AutoGen.py
Normal file
1919
BaseTools/Source/Python/AutoGen/AutoGen.py
Normal file
File diff suppressed because it is too large
Load Diff
622
BaseTools/Source/Python/AutoGen/BuildEngine.py
Normal file
622
BaseTools/Source/Python/AutoGen/BuildEngine.py
Normal file
@@ -0,0 +1,622 @@
|
||||
## @file
|
||||
# The engine for building files
|
||||
#
|
||||
# Copyright (c) 2007, Intel Corporation
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are licensed and made available under the terms and conditions of the BSD License
|
||||
# which accompanies this distribution. The full text of the license may be found at
|
||||
# http://opensource.org/licenses/bsd-license.php
|
||||
#
|
||||
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
#
|
||||
|
||||
##
|
||||
# Import Modules
|
||||
#
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import string
|
||||
|
||||
from Common.GlobalData import *
|
||||
from Common.BuildToolError import *
|
||||
from Common.Misc import tdict, PathClass
|
||||
from Common.String import NormPath
|
||||
from Common.DataType import *
|
||||
|
||||
import Common.EdkLogger as EdkLogger
|
||||
|
||||
## Convert file type to file list macro name
|
||||
#
|
||||
# @param FileType The name of file type
|
||||
#
|
||||
# @retval string The name of macro
|
||||
#
|
||||
def FileListMacro(FileType):
|
||||
return "%sS" % FileType.replace("-", "_").upper()
|
||||
|
||||
## Convert file type to list file macro name
|
||||
#
|
||||
# @param FileType The name of file type
|
||||
#
|
||||
# @retval string The name of macro
|
||||
#
|
||||
def ListFileMacro(FileType):
|
||||
return "%s_LIST" % FileListMacro(FileType)
|
||||
|
||||
class TargetDescBlock(object):
|
||||
_Cache_ = {} # {TargetFile : TargetDescBlock object}
|
||||
|
||||
# Factory method
|
||||
def __new__(Class, Inputs, Outputs, Commands, Dependencies):
|
||||
if Outputs[0] in Class._Cache_:
|
||||
Tdb = Class._Cache_[Outputs[0]]
|
||||
for File in Inputs:
|
||||
Tdb.AddInput(File)
|
||||
else:
|
||||
Tdb = super(TargetDescBlock, Class).__new__(Class)
|
||||
Tdb._Init(Inputs, Outputs, Commands, Dependencies)
|
||||
#Class._Cache_[Outputs[0]] = Tdb
|
||||
return Tdb
|
||||
|
||||
def _Init(self, Inputs, Outputs, Commands, Dependencies):
|
||||
self.Inputs = Inputs
|
||||
self.Outputs = Outputs
|
||||
self.Commands = Commands
|
||||
self.Dependencies = Dependencies
|
||||
if self.Outputs:
|
||||
self.Target = self.Outputs[0]
|
||||
else:
|
||||
self.Target = None
|
||||
|
||||
def __str__(self):
|
||||
return self.Target.Path
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.Target.Path)
|
||||
|
||||
def __eq__(self, Other):
|
||||
if type(Other) == type(self):
|
||||
return Other.Target.Path == self.Target.Path
|
||||
else:
|
||||
return str(Other) == self.Target.Path
|
||||
|
||||
def AddInput(self, Input):
|
||||
if Input not in self.Inputs:
|
||||
self.Inputs.append(Input)
|
||||
|
||||
def IsMultipleInput(self):
|
||||
return len(self.Inputs) > 1
|
||||
|
||||
@staticmethod
|
||||
def Renew():
|
||||
TargetDescBlock._Cache_ = {}
|
||||
|
||||
## Class for one build rule
|
||||
#
|
||||
# This represents a build rule which can give out corresponding command list for
|
||||
# building the given source file(s). The result can be used for generating the
|
||||
# target for makefile.
|
||||
#
|
||||
class FileBuildRule:
|
||||
INC_LIST_MACRO = "INC_LIST"
|
||||
INC_MACRO = "INC"
|
||||
|
||||
## constructor
|
||||
#
|
||||
# @param Input The dictionary represeting input file(s) for a rule
|
||||
# @param Output The list represeting output file(s) for a rule
|
||||
# @param Command The list containing commands to generate the output from input
|
||||
#
|
||||
def __init__(self, Type, Input, Output, Command, ExtraDependency=None):
|
||||
# The Input should not be empty
|
||||
if not Input:
|
||||
Input = []
|
||||
if not Output:
|
||||
Output = []
|
||||
if not Command:
|
||||
Command = []
|
||||
|
||||
self.FileListMacro = FileListMacro(Type)
|
||||
self.ListFileMacro = ListFileMacro(Type)
|
||||
self.IncListFileMacro = self.INC_LIST_MACRO
|
||||
|
||||
self.SourceFileType = Type
|
||||
# source files listed not in "*" or "?" pattern format
|
||||
if not ExtraDependency:
|
||||
self.ExtraSourceFileList = []
|
||||
else:
|
||||
self.ExtraSourceFileList = ExtraDependency
|
||||
|
||||
#
|
||||
# Search macros used in command lines for <FILE_TYPE>_LIST and INC_LIST.
|
||||
# If found, generate a file to keep the input files used to get over the
|
||||
# limitation of command line length
|
||||
#
|
||||
self.MacroList = []
|
||||
self.CommandList = []
|
||||
for CmdLine in Command:
|
||||
self.MacroList.extend(gMacroPattern.findall(CmdLine))
|
||||
# replace path separator with native one
|
||||
self.CommandList.append(CmdLine)
|
||||
|
||||
# Indicate what should be generated
|
||||
if self.FileListMacro in self.MacroList:
|
||||
self.GenFileListMacro = True
|
||||
else:
|
||||
self.GenFileListMacro = False
|
||||
|
||||
if self.ListFileMacro in self.MacroList:
|
||||
self.GenListFile = True
|
||||
self.GenFileListMacro = True
|
||||
else:
|
||||
self.GenListFile = False
|
||||
|
||||
if self.INC_LIST_MACRO in self.MacroList:
|
||||
self.GenIncListFile = True
|
||||
else:
|
||||
self.GenIncListFile = False
|
||||
|
||||
# Check input files
|
||||
self.IsMultipleInput = False
|
||||
self.SourceFileExtList = []
|
||||
for File in Input:
|
||||
Base, Ext = os.path.splitext(File)
|
||||
if Base.find("*") >= 0:
|
||||
# There's "*" in the file name
|
||||
self.IsMultipleInput = True
|
||||
self.GenFileListMacro = True
|
||||
elif Base.find("?") < 0:
|
||||
# There's no "*" and "?" in file name
|
||||
self.ExtraSourceFileList.append(File)
|
||||
continue
|
||||
if Ext not in self.SourceFileExtList:
|
||||
self.SourceFileExtList.append(Ext)
|
||||
|
||||
# Check output files
|
||||
self.DestFileList = []
|
||||
for File in Output:
|
||||
self.DestFileList.append(File)
|
||||
|
||||
# All build targets generated by this rule for a module
|
||||
self.BuildTargets = {}
|
||||
|
||||
## str() function support
|
||||
#
|
||||
# @retval string
|
||||
#
|
||||
def __str__(self):
|
||||
SourceString = ""
|
||||
SourceString += " %s %s %s" % (self.SourceFileType, " ".join(self.SourceFileExtList), self.ExtraSourceFileList)
|
||||
DestString = ", ".join(self.DestFileList)
|
||||
CommandString = "\n\t".join(self.CommandList)
|
||||
return "%s : %s\n\t%s" % (DestString, SourceString, CommandString)
|
||||
|
||||
## Check if given file extension is supported by this rule
|
||||
#
|
||||
# @param FileExt The extension of a file
|
||||
#
|
||||
# @retval True If the extension is supported
|
||||
# @retval False If the extension is not supported
|
||||
#
|
||||
def IsSupported(self, FileExt):
|
||||
return FileExt in self.SourceFileExtList
|
||||
|
||||
def Instantiate(self, Macros={}):
|
||||
NewRuleObject = copy.copy(self)
|
||||
NewRuleObject.BuildTargets = {}
|
||||
NewRuleObject.DestFileList = []
|
||||
for File in self.DestFileList:
|
||||
NewRuleObject.DestFileList.append(PathClass(NormPath(File, Macros)))
|
||||
return NewRuleObject
|
||||
|
||||
## Apply the rule to given source file(s)
|
||||
#
|
||||
# @param SourceFile One file or a list of files to be built
|
||||
# @param RelativeToDir The relative path of the source file
|
||||
# @param PathSeparator Path separator
|
||||
#
|
||||
# @retval tuple (Source file in full path, List of individual sourcefiles, Destionation file, List of build commands)
|
||||
#
|
||||
def Apply(self, SourceFile):
|
||||
if not self.CommandList or not self.DestFileList:
|
||||
return None
|
||||
|
||||
# source file
|
||||
if self.IsMultipleInput:
|
||||
SrcFileName = ""
|
||||
SrcFileBase = ""
|
||||
SrcFileExt = ""
|
||||
SrcFileDir = ""
|
||||
SrcPath = ""
|
||||
# SourceFile must be a list
|
||||
SrcFile = "$(%s)" % self.FileListMacro
|
||||
else:
|
||||
SrcFileName, SrcFileBase, SrcFileExt = SourceFile.Name, SourceFile.BaseName, SourceFile.Ext
|
||||
if SourceFile.Root:
|
||||
SrcFileDir = SourceFile.SubDir
|
||||
if SrcFileDir == "":
|
||||
SrcFileDir = "."
|
||||
else:
|
||||
SrcFileDir = "."
|
||||
SrcFile = SourceFile.Path
|
||||
SrcPath = SourceFile.Dir
|
||||
|
||||
# destination file (the first one)
|
||||
if self.DestFileList:
|
||||
DestFile = self.DestFileList[0].Path
|
||||
DestPath = self.DestFileList[0].Dir
|
||||
DestFileName = self.DestFileList[0].Name
|
||||
DestFileBase, DestFileExt = self.DestFileList[0].BaseName, self.DestFileList[0].Ext
|
||||
else:
|
||||
DestFile = ""
|
||||
DestPath = ""
|
||||
DestFileName = ""
|
||||
DestFileBase = ""
|
||||
DestFileExt = ""
|
||||
|
||||
BuildRulePlaceholderDict = {
|
||||
# source file
|
||||
"src" : SrcFile,
|
||||
"s_path" : SrcPath,
|
||||
"s_dir" : SrcFileDir,
|
||||
"s_name" : SrcFileName,
|
||||
"s_base" : SrcFileBase,
|
||||
"s_ext" : SrcFileExt,
|
||||
# destination file
|
||||
"dst" : DestFile,
|
||||
"d_path" : DestPath,
|
||||
"d_name" : DestFileName,
|
||||
"d_base" : DestFileBase,
|
||||
"d_ext" : DestFileExt,
|
||||
}
|
||||
|
||||
DstFile = []
|
||||
for File in self.DestFileList:
|
||||
File = string.Template(str(File)).safe_substitute(BuildRulePlaceholderDict)
|
||||
File = string.Template(str(File)).safe_substitute(BuildRulePlaceholderDict)
|
||||
DstFile.append(PathClass(File, IsBinary=True))
|
||||
|
||||
if DstFile[0] in self.BuildTargets:
|
||||
TargetDesc = self.BuildTargets[DstFile[0]]
|
||||
TargetDesc.AddInput(SourceFile)
|
||||
else:
|
||||
CommandList = []
|
||||
for CommandString in self.CommandList:
|
||||
CommandString = string.Template(CommandString).safe_substitute(BuildRulePlaceholderDict)
|
||||
CommandString = string.Template(CommandString).safe_substitute(BuildRulePlaceholderDict)
|
||||
CommandList.append(CommandString)
|
||||
TargetDesc = TargetDescBlock([SourceFile], DstFile, CommandList, self.ExtraSourceFileList)
|
||||
TargetDesc.ListFileMacro = self.ListFileMacro
|
||||
TargetDesc.FileListMacro = self.FileListMacro
|
||||
TargetDesc.IncListFileMacro = self.IncListFileMacro
|
||||
TargetDesc.GenFileListMacro = self.GenFileListMacro
|
||||
TargetDesc.GenListFile = self.GenListFile
|
||||
TargetDesc.GenIncListFile = self.GenIncListFile
|
||||
self.BuildTargets[DstFile[0]] = TargetDesc
|
||||
return TargetDesc
|
||||
|
||||
## Class for build rules
|
||||
#
|
||||
# BuildRule class parses rules defined in a file or passed by caller, and converts
|
||||
# the rule into FileBuildRule object.
|
||||
#
|
||||
class BuildRule:
|
||||
_SectionHeader = "SECTIONHEADER"
|
||||
_Section = "SECTION"
|
||||
_SubSectionHeader = "SUBSECTIONHEADER"
|
||||
_SubSection = "SUBSECTION"
|
||||
_InputFile = "INPUTFILE"
|
||||
_OutputFile = "OUTPUTFILE"
|
||||
_ExtraDependency = "EXTRADEPENDENCY"
|
||||
_Command = "COMMAND"
|
||||
_UnknownSection = "UNKNOWNSECTION"
|
||||
|
||||
_SubSectionList = [_InputFile, _OutputFile, _Command]
|
||||
|
||||
_PATH_SEP = "(+)"
|
||||
_FileTypePattern = re.compile("^[_a-zA-Z][_\-0-9a-zA-Z]*$")
|
||||
_BinaryFileRule = FileBuildRule(TAB_DEFAULT_BINARY_FILE, [], [os.path.join("$(OUTPUT_DIR)", "${s_name}")],
|
||||
["$(CP) ${src} ${dst}"], [])
|
||||
|
||||
## Constructor
|
||||
#
|
||||
# @param File The file containing build rules in a well defined format
|
||||
# @param Content The string list of build rules in a well defined format
|
||||
# @param LineIndex The line number from which the parsing will begin
|
||||
# @param SupportedFamily The list of supported tool chain families
|
||||
#
|
||||
def __init__(self, File=None, Content=None, LineIndex=0, SupportedFamily=["MSFT", "INTEL", "GCC", "RVCT"]):
|
||||
self.RuleFile = File
|
||||
# Read build rules from file if it's not none
|
||||
if File != None:
|
||||
try:
|
||||
self.RuleContent = open(File, 'r').readlines()
|
||||
except:
|
||||
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=File)
|
||||
elif Content != None:
|
||||
self.RuleContent = Content
|
||||
else:
|
||||
EdkLogger.error("build", PARAMETER_MISSING, ExtraData="No rule file or string given")
|
||||
|
||||
self.SupportedToolChainFamilyList = SupportedFamily
|
||||
self.RuleDatabase = tdict(True, 4) # {FileExt, ModuleType, Arch, Family : FileBuildRule object}
|
||||
self.Ext2FileType = {} # {ext : file-type}
|
||||
self.FileTypeList = set()
|
||||
|
||||
self._LineIndex = LineIndex
|
||||
self._State = ""
|
||||
self._RuleInfo = tdict(True, 2) # {toolchain family : {"InputFile": {}, "OutputFile" : [], "Command" : []}}
|
||||
self._FileType = ''
|
||||
self._BuildTypeList = []
|
||||
self._ArchList = []
|
||||
self._FamilyList = []
|
||||
self._TotalToolChainFamilySet = set()
|
||||
self._RuleObjectList = [] # FileBuildRule object list
|
||||
|
||||
self.Parse()
|
||||
|
||||
# some intrinsic rules
|
||||
self.RuleDatabase[TAB_DEFAULT_BINARY_FILE, "COMMON", "COMMON", "COMMON"] = self._BinaryFileRule
|
||||
self.FileTypeList.add(TAB_DEFAULT_BINARY_FILE)
|
||||
|
||||
## Parse the build rule strings
|
||||
def Parse(self):
|
||||
self._State = self._Section
|
||||
for Index in range(self._LineIndex, len(self.RuleContent)):
|
||||
# Clean up the line and replace path separator with native one
|
||||
Line = self.RuleContent[Index].strip().replace(self._PATH_SEP, os.path.sep)
|
||||
self.RuleContent[Index] = Line
|
||||
|
||||
# skip empty or comment line
|
||||
if Line == "" or Line[0] == "#":
|
||||
continue
|
||||
|
||||
# find out section header, enclosed by []
|
||||
if Line[0] == '[' and Line[-1] == ']':
|
||||
# merge last section information into rule database
|
||||
self.EndOfSection()
|
||||
self._State = self._SectionHeader
|
||||
# find out sub-section header, enclosed by <>
|
||||
elif Line[0] == '<' and Line[-1] == '>':
|
||||
if self._State != self._UnknownSection:
|
||||
self._State = self._SubSectionHeader
|
||||
|
||||
# call section handler to parse each (sub)section
|
||||
self._StateHandler[self._State](self, Index)
|
||||
# merge last section information into rule database
|
||||
self.EndOfSection()
|
||||
|
||||
## Parse definitions under a section
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseSection(self, LineIndex):
|
||||
pass
|
||||
|
||||
## Parse definitions under a subsection
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseSubSection(self, LineIndex):
|
||||
# currenly nothing here
|
||||
pass
|
||||
|
||||
## Placeholder for not supported sections
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def SkipSection(self, LineIndex):
|
||||
pass
|
||||
|
||||
## Merge section information just got into rule database
|
||||
def EndOfSection(self):
|
||||
Database = self.RuleDatabase
|
||||
# if there's specific toochain family, 'COMMON' doesn't make sense any more
|
||||
if len(self._TotalToolChainFamilySet) > 1 and 'COMMON' in self._TotalToolChainFamilySet:
|
||||
self._TotalToolChainFamilySet.remove('COMMON')
|
||||
for Family in self._TotalToolChainFamilySet:
|
||||
Input = self._RuleInfo[Family, self._InputFile]
|
||||
Output = self._RuleInfo[Family, self._OutputFile]
|
||||
Command = self._RuleInfo[Family, self._Command]
|
||||
ExtraDependency = self._RuleInfo[Family, self._ExtraDependency]
|
||||
|
||||
BuildRule = FileBuildRule(self._FileType, Input, Output, Command, ExtraDependency)
|
||||
for BuildType in self._BuildTypeList:
|
||||
for Arch in self._ArchList:
|
||||
Database[self._FileType, BuildType, Arch, Family] = BuildRule
|
||||
for FileExt in BuildRule.SourceFileExtList:
|
||||
self.Ext2FileType[FileExt] = self._FileType
|
||||
|
||||
## Parse section header
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseSectionHeader(self, LineIndex):
|
||||
self._RuleInfo = tdict(True, 2)
|
||||
self._BuildTypeList = []
|
||||
self._ArchList = []
|
||||
self._FamilyList = []
|
||||
self._TotalToolChainFamilySet = set()
|
||||
FileType = ''
|
||||
RuleNameList = self.RuleContent[LineIndex][1:-1].split(',')
|
||||
for RuleName in RuleNameList:
|
||||
Arch = 'COMMON'
|
||||
BuildType = 'COMMON'
|
||||
TokenList = [Token.strip().upper() for Token in RuleName.split('.')]
|
||||
# old format: Build.File-Type
|
||||
if TokenList[0] == "BUILD":
|
||||
if len(TokenList) == 1:
|
||||
EdkLogger.error("build", FORMAT_INVALID, "Invalid rule section",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
|
||||
FileType = TokenList[1]
|
||||
if FileType == '':
|
||||
EdkLogger.error("build", FORMAT_INVALID, "No file type given",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
if self._FileTypePattern.match(FileType) == None:
|
||||
EdkLogger.error("build", FORMAT_INVALID, File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData="Only character, number (non-first character), '_' and '-' are allowed in file type")
|
||||
# new format: File-Type.Build-Type.Arch
|
||||
else:
|
||||
if FileType == '':
|
||||
FileType = TokenList[0]
|
||||
elif FileType != TokenList[0]:
|
||||
EdkLogger.error("build", FORMAT_INVALID,
|
||||
"Different file types are not allowed in the same rule section",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
if len(TokenList) > 1:
|
||||
BuildType = TokenList[1]
|
||||
if len(TokenList) > 2:
|
||||
Arch = TokenList[2]
|
||||
if BuildType not in self._BuildTypeList:
|
||||
self._BuildTypeList.append(BuildType)
|
||||
if Arch not in self._ArchList:
|
||||
self._ArchList.append(Arch)
|
||||
|
||||
if 'COMMON' in self._BuildTypeList and len(self._BuildTypeList) > 1:
|
||||
EdkLogger.error("build", FORMAT_INVALID,
|
||||
"Specific build types must not be mixed with common one",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
if 'COMMON' in self._ArchList and len(self._ArchList) > 1:
|
||||
EdkLogger.error("build", FORMAT_INVALID,
|
||||
"Specific ARCH must not be mixed with common one",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
|
||||
self._FileType = FileType
|
||||
self._State = self._Section
|
||||
self.FileTypeList.add(FileType)
|
||||
|
||||
## Parse sub-section header
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseSubSectionHeader(self, LineIndex):
|
||||
SectionType = ""
|
||||
List = self.RuleContent[LineIndex][1:-1].split(',')
|
||||
FamilyList = []
|
||||
for Section in List:
|
||||
TokenList = Section.split('.')
|
||||
Type = TokenList[0].strip().upper()
|
||||
|
||||
if SectionType == "":
|
||||
SectionType = Type
|
||||
elif SectionType != Type:
|
||||
EdkLogger.error("build", FORMAT_INVALID,
|
||||
"Two different section types are not allowed in the same sub-section",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
|
||||
if len(TokenList) > 1:
|
||||
Family = TokenList[1].strip().upper()
|
||||
else:
|
||||
Family = "COMMON"
|
||||
|
||||
if Family not in FamilyList:
|
||||
FamilyList.append(Family)
|
||||
|
||||
self._FamilyList = FamilyList
|
||||
self._TotalToolChainFamilySet.update(FamilyList)
|
||||
self._State = SectionType.upper()
|
||||
if 'COMMON' in FamilyList and len(FamilyList) > 1:
|
||||
EdkLogger.error("build", FORMAT_INVALID,
|
||||
"Specific tool chain family should not be mixed with general one",
|
||||
File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData=self.RuleContent[LineIndex])
|
||||
if self._State not in self._StateHandler:
|
||||
EdkLogger.error("build", FORMAT_INVALID, File=self.RuleFile, Line=LineIndex+1,
|
||||
ExtraData="Unknown subsection: %s" % self.RuleContent[LineIndex])
|
||||
## Parse <InputFile> sub-section
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseInputFile(self, LineIndex):
|
||||
FileList = [File.strip() for File in self.RuleContent[LineIndex].split(",")]
|
||||
for ToolChainFamily in self._FamilyList:
|
||||
InputFiles = self._RuleInfo[ToolChainFamily, self._State]
|
||||
if InputFiles == None:
|
||||
InputFiles = []
|
||||
self._RuleInfo[ToolChainFamily, self._State] = InputFiles
|
||||
InputFiles.extend(FileList)
|
||||
|
||||
## Parse <ExtraDependency> sub-section
|
||||
#
|
||||
# @param LineIndex The line index of build rule text
|
||||
#
|
||||
def ParseCommon(self, LineIndex):
|
||||
for ToolChainFamily in self._FamilyList:
|
||||
Items = self._RuleInfo[ToolChainFamily, self._State]
|
||||
if Items == None:
|
||||
Items = []
|
||||
self._RuleInfo[ToolChainFamily, self._State] = Items
|
||||
Items.append(self.RuleContent[LineIndex])
|
||||
|
||||
## Get a build rule via [] operator
|
||||
#
|
||||
# @param FileExt The extension of a file
|
||||
# @param ToolChainFamily The tool chain family name
|
||||
# @param BuildVersion The build version number. "*" means any rule
|
||||
# is applicalbe.
|
||||
#
|
||||
# @retval FileType The file type string
|
||||
# @retval FileBuildRule The object of FileBuildRule
|
||||
#
|
||||
# Key = (FileExt, ModuleType, Arch, ToolChainFamily)
|
||||
def __getitem__(self, Key):
|
||||
if not Key:
|
||||
return None
|
||||
|
||||
if Key[0] in self.Ext2FileType:
|
||||
Type = self.Ext2FileType[Key[0]]
|
||||
elif Key[0].upper() in self.FileTypeList:
|
||||
Type = Key[0].upper()
|
||||
else:
|
||||
return None
|
||||
|
||||
if len(Key) > 1:
|
||||
Key = (Type,) + Key[1:]
|
||||
else:
|
||||
Key = (Type,)
|
||||
return self.RuleDatabase[Key]
|
||||
|
||||
_StateHandler = {
|
||||
_SectionHeader : ParseSectionHeader,
|
||||
_Section : ParseSection,
|
||||
_SubSectionHeader : ParseSubSectionHeader,
|
||||
_SubSection : ParseSubSection,
|
||||
_InputFile : ParseInputFile,
|
||||
_OutputFile : ParseCommon,
|
||||
_ExtraDependency : ParseCommon,
|
||||
_Command : ParseCommon,
|
||||
_UnknownSection : SkipSection,
|
||||
}
|
||||
|
||||
# This acts like the main() function for the script, unless it is 'import'ed into another
|
||||
# script.
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
EdkLogger.Initialize()
|
||||
if len(sys.argv) > 1:
|
||||
Br = BuildRule(sys.argv[1])
|
||||
print str(Br[".c", "DXE_DRIVER", "IA32", "MSFT"][1])
|
||||
print
|
||||
print str(Br[".c", "DXE_DRIVER", "IA32", "INTEL"][1])
|
||||
print
|
||||
print str(Br[".c", "DXE_DRIVER", "IA32", "GCC"][1])
|
||||
print
|
||||
print str(Br[".ac", "ACPI_TABLE", "IA32", "MSFT"][1])
|
||||
print
|
||||
print str(Br[".h", "ACPI_TABLE", "IA32", "INTEL"][1])
|
||||
print
|
||||
print str(Br[".ac", "ACPI_TABLE", "IA32", "MSFT"][1])
|
||||
print
|
||||
print str(Br[".s", "SEC", "IPF", "COMMON"][1])
|
||||
print
|
||||
print str(Br[".s", "SEC"][1])
|
||||
|
1931
BaseTools/Source/Python/AutoGen/GenC.py
Normal file
1931
BaseTools/Source/Python/AutoGen/GenC.py
Normal file
File diff suppressed because it is too large
Load Diff
441
BaseTools/Source/Python/AutoGen/GenDepex.py
Normal file
441
BaseTools/Source/Python/AutoGen/GenDepex.py
Normal file
@@ -0,0 +1,441 @@
|
||||
## @file
|
||||
# This file is used to generate DEPEX file for module's dependency expression
|
||||
#
|
||||
# Copyright (c) 2007, Intel Corporation
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are licensed and made available under the terms and conditions of the BSD License
|
||||
# which accompanies this distribution. The full text of the license may be found at
|
||||
# http://opensource.org/licenses/bsd-license.php
|
||||
#
|
||||
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
## Import Modules
|
||||
#
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from StringIO import StringIO
|
||||
from struct import pack
|
||||
from Common.BuildToolError import *
|
||||
from Common.Misc import SaveFileOnChange
|
||||
from Common.Misc import GuidStructureStringToGuidString
|
||||
from Common import EdkLogger as EdkLogger
|
||||
|
||||
|
||||
## Regular expression for matching "DEPENDENCY_START ... DEPENDENCY_END"
|
||||
gStartClosePattern = re.compile(".*DEPENDENCY_START(.+)DEPENDENCY_END.*", re.S)
|
||||
|
||||
## Mapping between module type and EFI phase
|
||||
gType2Phase = {
|
||||
"BASE" : None,
|
||||
"SEC" : "PEI",
|
||||
"PEI_CORE" : "PEI",
|
||||
"PEIM" : "PEI",
|
||||
"DXE_CORE" : "DXE",
|
||||
"DXE_DRIVER" : "DXE",
|
||||
"DXE_SMM_DRIVER" : "DXE",
|
||||
"DXE_RUNTIME_DRIVER": "DXE",
|
||||
"DXE_SAL_DRIVER" : "DXE",
|
||||
"UEFI_DRIVER" : "DXE",
|
||||
"UEFI_APPLICATION" : "DXE",
|
||||
"SMM_DRIVER" : "DXE",
|
||||
}
|
||||
|
||||
## Convert dependency expression string into EFI internal representation
|
||||
#
|
||||
# DependencyExpression class is used to parse dependency expression string and
|
||||
# convert it into its binary form.
|
||||
#
|
||||
class DependencyExpression:
|
||||
|
||||
ArchProtocols = set([
|
||||
'665e3ff6-46cc-11d4-9a38-0090273fc14d', # 'gEfiBdsArchProtocolGuid'
|
||||
'26baccb1-6f42-11d4-bce7-0080c73c8881', # 'gEfiCpuArchProtocolGuid'
|
||||
'26baccb2-6f42-11d4-bce7-0080c73c8881', # 'gEfiMetronomeArchProtocolGuid'
|
||||
'1da97072-bddc-4b30-99f1-72a0b56fff2a', # 'gEfiMonotonicCounterArchProtocolGuid'
|
||||
'27cfac87-46cc-11d4-9a38-0090273fc14d', # 'gEfiRealTimeClockArchProtocolGuid'
|
||||
'27cfac88-46cc-11d4-9a38-0090273fc14d', # 'gEfiResetArchProtocolGuid'
|
||||
'b7dfb4e1-052f-449f-87be-9818fc91b733', # 'gEfiRuntimeArchProtocolGuid'
|
||||
'a46423e3-4617-49f1-b9ff-d1bfa9115839', # 'gEfiSecurityArchProtocolGuid'
|
||||
'26baccb3-6f42-11d4-bce7-0080c73c8881', # 'gEfiTimerArchProtocolGuid'
|
||||
'6441f818-6362-4e44-b570-7dba31dd2453', # 'gEfiVariableWriteArchProtocolGuid'
|
||||
'1e5668e2-8481-11d4-bcf1-0080c73c8881', # 'gEfiVariableArchProtocolGuid'
|
||||
'665e3ff5-46cc-11d4-9a38-0090273fc14d' # 'gEfiWatchdogTimerArchProtocolGuid'
|
||||
]
|
||||
)
|
||||
|
||||
OpcodePriority = {
|
||||
"AND" : 1,
|
||||
"OR" : 1,
|
||||
"NOT" : 2,
|
||||
# "SOR" : 9,
|
||||
# "BEFORE": 9,
|
||||
# "AFTER" : 9,
|
||||
}
|
||||
|
||||
Opcode = {
|
||||
"PEI" : {
|
||||
"PUSH" : 0x02,
|
||||
"AND" : 0x03,
|
||||
"OR" : 0x04,
|
||||
"NOT" : 0x05,
|
||||
"TRUE" : 0x06,
|
||||
"FALSE" : 0x07,
|
||||
"END" : 0x08
|
||||
},
|
||||
|
||||
"DXE" : {
|
||||
"BEFORE": 0x00,
|
||||
"AFTER" : 0x01,
|
||||
"PUSH" : 0x02,
|
||||
"AND" : 0x03,
|
||||
"OR" : 0x04,
|
||||
"NOT" : 0x05,
|
||||
"TRUE" : 0x06,
|
||||
"FALSE" : 0x07,
|
||||
"END" : 0x08,
|
||||
"SOR" : 0x09
|
||||
}
|
||||
}
|
||||
|
||||
# all supported op codes and operands
|
||||
SupportedOpcode = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "END", "SOR"]
|
||||
SupportedOperand = ["TRUE", "FALSE"]
|
||||
|
||||
OpcodeWithSingleOperand = ['NOT', 'BEFORE', 'AFTER']
|
||||
OpcodeWithTwoOperand = ['AND', 'OR']
|
||||
|
||||
# op code that should not be the last one
|
||||
NonEndingOpcode = ["AND", "OR", "NOT", 'SOR']
|
||||
# op code must not present at the same time
|
||||
ExclusiveOpcode = ["BEFORE", "AFTER"]
|
||||
# op code that should be the first one if it presents
|
||||
AboveAllOpcode = ["SOR", "BEFORE", "AFTER"]
|
||||
|
||||
#
|
||||
# open and close brace must be taken as individual tokens
|
||||
#
|
||||
TokenPattern = re.compile("(\(|\)|\{[^{}]+\{?[^{}]+\}?[ ]*\}|\w+)")
|
||||
|
||||
## Constructor
|
||||
#
|
||||
# @param Expression The list or string of dependency expression
|
||||
# @param ModuleType The type of the module using the dependency expression
|
||||
#
|
||||
def __init__(self, Expression, ModuleType, Optimize=False):
|
||||
self.ModuleType = ModuleType
|
||||
self.Phase = gType2Phase[ModuleType]
|
||||
if type(Expression) == type([]):
|
||||
self.ExpressionString = " ".join(Expression)
|
||||
self.TokenList = Expression
|
||||
else:
|
||||
self.ExpressionString = Expression
|
||||
self.GetExpressionTokenList()
|
||||
|
||||
self.PostfixNotation = []
|
||||
self.OpcodeList = []
|
||||
|
||||
self.GetPostfixNotation()
|
||||
self.ValidateOpcode()
|
||||
|
||||
EdkLogger.debug(EdkLogger.DEBUG_8, repr(self))
|
||||
if Optimize:
|
||||
self.Optimize()
|
||||
EdkLogger.debug(EdkLogger.DEBUG_8, "\n Optimized: " + repr(self))
|
||||
|
||||
def __str__(self):
|
||||
return " ".join(self.TokenList)
|
||||
|
||||
def __repr__(self):
|
||||
WellForm = ''
|
||||
for Token in self.PostfixNotation:
|
||||
if Token in self.SupportedOpcode:
|
||||
WellForm += "\n " + Token
|
||||
else:
|
||||
WellForm += ' ' + Token
|
||||
return WellForm
|
||||
|
||||
## Split the expression string into token list
|
||||
def GetExpressionTokenList(self):
|
||||
self.TokenList = self.TokenPattern.findall(self.ExpressionString)
|
||||
|
||||
## Convert token list into postfix notation
|
||||
def GetPostfixNotation(self):
|
||||
Stack = []
|
||||
LastToken = ''
|
||||
for Token in self.TokenList:
|
||||
if Token == "(":
|
||||
if LastToken not in self.SupportedOpcode + ['(', '', None]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: missing operator before open parentheses",
|
||||
ExtraData="Near %s" % LastToken)
|
||||
Stack.append(Token)
|
||||
elif Token == ")":
|
||||
if '(' not in Stack:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: mismatched parentheses",
|
||||
ExtraData=str(self))
|
||||
elif LastToken in self.SupportedOpcode + ['', None]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: missing operand before close parentheses",
|
||||
ExtraData="Near %s" % LastToken)
|
||||
while len(Stack) > 0:
|
||||
if Stack[-1] == '(':
|
||||
Stack.pop()
|
||||
break
|
||||
self.PostfixNotation.append(Stack.pop())
|
||||
elif Token in self.OpcodePriority:
|
||||
if Token == "NOT":
|
||||
if LastToken not in self.SupportedOpcode + ['(', '', None]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: missing operator before NOT",
|
||||
ExtraData="Near %s" % LastToken)
|
||||
elif LastToken in self.SupportedOpcode + ['(', '', None]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: missing operand before " + Token,
|
||||
ExtraData="Near %s" % LastToken)
|
||||
|
||||
while len(Stack) > 0:
|
||||
if Stack[-1] == "(" or self.OpcodePriority[Token] >= self.OpcodePriority[Stack[-1]]:
|
||||
break
|
||||
self.PostfixNotation.append(Stack.pop())
|
||||
Stack.append(Token)
|
||||
self.OpcodeList.append(Token)
|
||||
else:
|
||||
if Token not in self.SupportedOpcode:
|
||||
# not OP, take it as GUID
|
||||
if LastToken not in self.SupportedOpcode + ['(', '', None]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: missing operator before %s" % Token,
|
||||
ExtraData="Near %s" % LastToken)
|
||||
if len(self.OpcodeList) == 0 or self.OpcodeList[-1] not in self.ExclusiveOpcode:
|
||||
if Token not in self.SupportedOperand:
|
||||
self.PostfixNotation.append("PUSH")
|
||||
# check if OP is valid in this phase
|
||||
elif Token in self.Opcode[self.Phase]:
|
||||
if Token == "END":
|
||||
break
|
||||
self.OpcodeList.append(Token)
|
||||
else:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR,
|
||||
"Opcode=%s doesn't supported in %s stage " % (Token, self.Phase),
|
||||
ExtraData=str(self))
|
||||
self.PostfixNotation.append(Token)
|
||||
LastToken = Token
|
||||
|
||||
# there should not be parentheses in Stack
|
||||
if '(' in Stack or ')' in Stack:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid dependency expression: mismatched parentheses",
|
||||
ExtraData=str(self))
|
||||
while len(Stack) > 0:
|
||||
self.PostfixNotation.append(Stack.pop())
|
||||
if self.PostfixNotation[-1] != 'END':
|
||||
self.PostfixNotation.append("END")
|
||||
|
||||
## Validate the dependency expression
|
||||
def ValidateOpcode(self):
|
||||
for Op in self.AboveAllOpcode:
|
||||
if Op in self.PostfixNotation:
|
||||
if Op != self.PostfixNotation[0]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "%s should be the first opcode in the expression" % Op,
|
||||
ExtraData=str(self))
|
||||
if len(self.PostfixNotation) < 3:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Missing operand for %s" % Op,
|
||||
ExtraData=str(self))
|
||||
for Op in self.ExclusiveOpcode:
|
||||
if Op in self.OpcodeList:
|
||||
if len(self.OpcodeList) > 1:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "%s should be the only opcode in the expression" % Op,
|
||||
ExtraData=str(self))
|
||||
if len(self.PostfixNotation) < 3:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Missing operand for %s" % Op,
|
||||
ExtraData=str(self))
|
||||
if self.TokenList[-1] != 'END' and self.TokenList[-1] in self.NonEndingOpcode:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Extra %s at the end of the dependency expression" % self.TokenList[-1],
|
||||
ExtraData=str(self))
|
||||
if self.TokenList[-1] == 'END' and self.TokenList[-2] in self.NonEndingOpcode:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Extra %s at the end of the dependency expression" % self.TokenList[-2],
|
||||
ExtraData=str(self))
|
||||
if "END" in self.TokenList and "END" != self.TokenList[-1]:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Extra expressions after END",
|
||||
ExtraData=str(self))
|
||||
|
||||
## Simply optimize the dependency expression by removing duplicated operands
|
||||
def Optimize(self):
|
||||
ValidOpcode = list(set(self.OpcodeList))
|
||||
if len(ValidOpcode) != 1 or ValidOpcode[0] not in ['AND', 'OR']:
|
||||
return
|
||||
Op = ValidOpcode[0]
|
||||
NewOperand = []
|
||||
AllOperand = set()
|
||||
for Token in self.PostfixNotation:
|
||||
if Token in self.SupportedOpcode or Token in NewOperand:
|
||||
continue
|
||||
AllOperand.add(Token)
|
||||
if Token == 'TRUE':
|
||||
if Op == 'AND':
|
||||
continue
|
||||
else:
|
||||
NewOperand.append(Token)
|
||||
break
|
||||
elif Token == 'FALSE':
|
||||
if Op == 'OR':
|
||||
continue
|
||||
else:
|
||||
NewOperand.append(Token)
|
||||
break
|
||||
NewOperand.append(Token)
|
||||
|
||||
# don't generate depex if only TRUE operand left
|
||||
if self.ModuleType == 'PEIM' and len(NewOperand) == 1 and NewOperand[0] == 'TRUE':
|
||||
self.PostfixNotation = []
|
||||
return
|
||||
|
||||
# don't generate depex if all operands are architecture protocols
|
||||
if self.ModuleType in ['UEFI_DRIVER', 'DXE_DRIVER', 'DXE_RUNTIME_DRIVER', 'DXE_SAL_DRIVER', 'DXE_SMM_DRIVER'] and \
|
||||
Op == 'AND' and \
|
||||
self.ArchProtocols == set([GuidStructureStringToGuidString(Guid) for Guid in AllOperand]):
|
||||
self.PostfixNotation = []
|
||||
return
|
||||
|
||||
if len(NewOperand) == 0:
|
||||
self.TokenList = list(AllOperand)
|
||||
else:
|
||||
self.TokenList = []
|
||||
while True:
|
||||
self.TokenList.append(NewOperand.pop(0))
|
||||
if NewOperand == []:
|
||||
break
|
||||
self.TokenList.append(Op)
|
||||
self.PostfixNotation = []
|
||||
self.GetPostfixNotation()
|
||||
|
||||
|
||||
## Convert a GUID value in C structure format into its binary form
|
||||
#
|
||||
# @param Guid The GUID value in C structure format
|
||||
#
|
||||
# @retval array The byte array representing the GUID value
|
||||
#
|
||||
def GetGuidValue(self, Guid):
|
||||
GuidValueString = Guid.replace("{", "").replace("}", "").replace(" ", "")
|
||||
GuidValueList = GuidValueString.split(",")
|
||||
if len(GuidValueList) != 11:
|
||||
EdkLogger.error("GenDepex", PARSER_ERROR, "Invalid GUID value string or opcode: %s" % Guid)
|
||||
return pack("1I2H8B", *(int(value, 16) for value in GuidValueList))
|
||||
|
||||
## Save the binary form of dependency expression in file
|
||||
#
|
||||
# @param File The path of file. If None is given, put the data on console
|
||||
#
|
||||
# @retval True If the file doesn't exist or file is changed
|
||||
# @retval False If file exists and is not changed.
|
||||
#
|
||||
def Generate(self, File=None):
|
||||
Buffer = StringIO()
|
||||
if len(self.PostfixNotation) == 0:
|
||||
return False
|
||||
|
||||
for Item in self.PostfixNotation:
|
||||
if Item in self.Opcode[self.Phase]:
|
||||
Buffer.write(pack("B", self.Opcode[self.Phase][Item]))
|
||||
elif Item in self.SupportedOpcode:
|
||||
EdkLogger.error("GenDepex", FORMAT_INVALID,
|
||||
"Opcode [%s] is not expected in %s phase" % (Item, self.Phase),
|
||||
ExtraData=self.ExpressionString)
|
||||
else:
|
||||
Buffer.write(self.GetGuidValue(Item))
|
||||
|
||||
FilePath = ""
|
||||
FileChangeFlag = True
|
||||
if File == None:
|
||||
sys.stdout.write(Buffer.getvalue())
|
||||
FilePath = "STDOUT"
|
||||
else:
|
||||
FileChangeFlag = SaveFileOnChange(File, Buffer.getvalue(), True)
|
||||
|
||||
Buffer.close()
|
||||
return FileChangeFlag
|
||||
|
||||
versionNumber = "0.04"
|
||||
__version__ = "%prog Version " + versionNumber
|
||||
__copyright__ = "Copyright (c) 2007-2008, Intel Corporation All rights reserved."
|
||||
__usage__ = "%prog [options] [dependency_expression_file]"
|
||||
|
||||
## Parse command line options
|
||||
#
|
||||
# @retval OptionParser
|
||||
#
|
||||
def GetOptions():
|
||||
from optparse import OptionParser
|
||||
|
||||
Parser = OptionParser(description=__copyright__, version=__version__, usage=__usage__)
|
||||
|
||||
Parser.add_option("-o", "--output", dest="OutputFile", default=None, metavar="FILE",
|
||||
help="Specify the name of depex file to be generated")
|
||||
Parser.add_option("-t", "--module-type", dest="ModuleType", default=None,
|
||||
help="The type of module for which the dependency expression serves")
|
||||
Parser.add_option("-e", "--dependency-expression", dest="Expression", default="",
|
||||
help="The string of dependency expression. If this option presents, the input file will be ignored.")
|
||||
Parser.add_option("-m", "--optimize", dest="Optimize", default=False, action="store_true",
|
||||
help="Do some simple optimization on the expression.")
|
||||
Parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true",
|
||||
help="build with verbose information")
|
||||
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
|
||||
Parser.add_option("-q", "--quiet", dest="quiet", default=False, action="store_true",
|
||||
help="build with little information")
|
||||
|
||||
return Parser.parse_args()
|
||||
|
||||
|
||||
## Entrance method
|
||||
#
|
||||
# @retval 0 Tool was successful
|
||||
# @retval 1 Tool failed
|
||||
#
|
||||
def Main():
|
||||
EdkLogger.Initialize()
|
||||
Option, Input = GetOptions()
|
||||
|
||||
# Set log level
|
||||
if Option.quiet:
|
||||
EdkLogger.SetLevel(EdkLogger.QUIET)
|
||||
elif Option.verbose:
|
||||
EdkLogger.SetLevel(EdkLogger.VERBOSE)
|
||||
elif Option.debug != None:
|
||||
EdkLogger.SetLevel(Option.debug + 1)
|
||||
else:
|
||||
EdkLogger.SetLevel(EdkLogger.INFO)
|
||||
|
||||
try:
|
||||
if Option.ModuleType == None or Option.ModuleType not in gType2Phase:
|
||||
EdkLogger.error("GenDepex", OPTION_MISSING, "Module type is not specified or supported")
|
||||
|
||||
DxsFile = ''
|
||||
if len(Input) > 0 and Option.Expression == "":
|
||||
DxsFile = Input[0]
|
||||
DxsString = open(DxsFile, 'r').read().replace("\n", " ").replace("\r", " ")
|
||||
DxsString = gStartClosePattern.sub("\\1", DxsString)
|
||||
elif Option.Expression != "":
|
||||
if Option.Expression[0] == '"':
|
||||
DxsString = Option.Expression[1:-1]
|
||||
else:
|
||||
DxsString = Option.Expression
|
||||
else:
|
||||
EdkLogger.error("GenDepex", OPTION_MISSING, "No expression string or file given")
|
||||
|
||||
Dpx = DependencyExpression(DxsString, Option.ModuleType, Option.Optimize)
|
||||
if Option.OutputFile != None:
|
||||
Dpx.Generate(Option.OutputFile)
|
||||
else:
|
||||
Dpx.Generate()
|
||||
except BaseException, X:
|
||||
EdkLogger.quiet("")
|
||||
if Option != None and Option.debug != None:
|
||||
EdkLogger.quiet(traceback.format_exc())
|
||||
else:
|
||||
EdkLogger.quiet(str(X))
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(Main())
|
||||
|
1389
BaseTools/Source/Python/AutoGen/GenMake.py
Normal file
1389
BaseTools/Source/Python/AutoGen/GenMake.py
Normal file
File diff suppressed because it is too large
Load Diff
532
BaseTools/Source/Python/AutoGen/StrGather.py
Normal file
532
BaseTools/Source/Python/AutoGen/StrGather.py
Normal file
@@ -0,0 +1,532 @@
|
||||
# Copyright (c) 2007, Intel Corporation
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are licensed and made available under the terms and conditions of the BSD License
|
||||
# which accompanies this distribution. The full text of the license may be found at
|
||||
# http://opensource.org/licenses/bsd-license.php
|
||||
#
|
||||
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
#
|
||||
#This file is used to parse a strings file and create or add to a string database file.
|
||||
#
|
||||
|
||||
##
|
||||
# Import Modules
|
||||
#
|
||||
import re
|
||||
import Common.EdkLogger as EdkLogger
|
||||
from Common.BuildToolError import *
|
||||
from UniClassObject import *
|
||||
|
||||
##
|
||||
# Static definitions
|
||||
#
|
||||
EFI_HII_SIBT_END = '0x00'
|
||||
EFI_HII_SIBT_STRING_SCSU = '0x10'
|
||||
EFI_HII_SIBT_STRING_SCSU_FONT = '0x11'
|
||||
EFI_HII_SIBT_STRINGS_SCSU = '0x12'
|
||||
EFI_HII_SIBT_STRINGS_SCSU_FONT = '0x13'
|
||||
EFI_HII_SIBT_STRING_UCS2 = '0x14'
|
||||
EFI_HII_SIBT_STRING_UCS2_FONT = '0x15'
|
||||
EFI_HII_SIBT_STRINGS_UCS2 = '0x16'
|
||||
EFI_HII_SIBT_STRINGS_UCS2_FONT = '0x17'
|
||||
EFI_HII_SIBT_DUPLICATE = '0x20'
|
||||
EFI_HII_SIBT_SKIP2 = '0x21'
|
||||
EFI_HII_SIBT_SKIP1 = '0x22'
|
||||
EFI_HII_SIBT_EXT1 = '0x30'
|
||||
EFI_HII_SIBT_EXT2 = '0x31'
|
||||
EFI_HII_SIBT_EXT4 = '0x32'
|
||||
EFI_HII_SIBT_FONT = '0x40'
|
||||
|
||||
EFI_HII_PACKAGE_STRINGS = '0x04'
|
||||
EFI_HII_PACKAGE_FORM = '0x02'
|
||||
|
||||
StringPackageType = EFI_HII_PACKAGE_STRINGS
|
||||
StringPackageForm = EFI_HII_PACKAGE_FORM
|
||||
StringBlockType = EFI_HII_SIBT_STRING_UCS2
|
||||
StringSkipType = EFI_HII_SIBT_SKIP2
|
||||
|
||||
HexHeader = '0x'
|
||||
|
||||
COMMENT = '// '
|
||||
DEFINE_STR = '#define'
|
||||
COMMENT_DEFINE_STR = COMMENT + DEFINE_STR
|
||||
NOT_REFERENCED = 'not referenced'
|
||||
COMMENT_NOT_REFERENCED = ' ' + COMMENT + NOT_REFERENCED
|
||||
CHAR_ARRAY_DEFIN = 'unsigned char'
|
||||
COMMON_FILE_NAME = 'Strings'
|
||||
OFFSET = 'offset'
|
||||
STRING = 'string'
|
||||
TO = 'to'
|
||||
STRING_TOKEN = re.compile('STRING_TOKEN *\(([A-Z0-9_]+) *\)', re.MULTILINE | re.UNICODE)
|
||||
|
||||
EFI_HII_ARRAY_SIZE_LENGTH = 4
|
||||
EFI_HII_PACKAGE_HEADER_LENGTH = 4
|
||||
EFI_HII_HDR_SIZE_LENGTH = 4
|
||||
EFI_HII_STRING_OFFSET_LENGTH = 4
|
||||
EFI_STRING_ID = 1
|
||||
EFI_STRING_ID_LENGTH = 2
|
||||
EFI_HII_LANGUAGE_WINDOW = 0
|
||||
EFI_HII_LANGUAGE_WINDOW_LENGTH = 2
|
||||
EFI_HII_LANGUAGE_WINDOW_NUMBER = 16
|
||||
EFI_HII_STRING_PACKAGE_HDR_LENGTH = EFI_HII_PACKAGE_HEADER_LENGTH + EFI_HII_HDR_SIZE_LENGTH + EFI_HII_STRING_OFFSET_LENGTH + EFI_HII_LANGUAGE_WINDOW_LENGTH * EFI_HII_LANGUAGE_WINDOW_NUMBER + EFI_STRING_ID_LENGTH
|
||||
|
||||
H_C_FILE_HEADER = ['//', \
|
||||
'// DO NOT EDIT -- auto-generated file', \
|
||||
'//', \
|
||||
'// This file is generated by the StrGather utility', \
|
||||
'//']
|
||||
LANGUAGE_NAME_STRING_NAME = '$LANGUAGE_NAME'
|
||||
PRINTABLE_LANGUAGE_NAME_STRING_NAME = '$PRINTABLE_LANGUAGE_NAME'
|
||||
|
||||
## Convert a dec number to a hex string
|
||||
#
|
||||
# Convert a dec number to a formatted hex string in length digit
|
||||
# The digit is set to default 8
|
||||
# The hex string starts with "0x"
|
||||
# DecToHexStr(1000) is '0x000003E8'
|
||||
# DecToHexStr(1000, 6) is '0x0003E8'
|
||||
#
|
||||
# @param Dec: The number in dec format
|
||||
# @param Digit: The needed digit of hex string
|
||||
#
|
||||
# @retval: The formatted hex string
|
||||
#
|
||||
def DecToHexStr(Dec, Digit = 8):
|
||||
return eval("'0x%0" + str(Digit) + "X' % int(Dec)")
|
||||
|
||||
## Convert a dec number to a hex list
|
||||
#
|
||||
# Convert a dec number to a formatted hex list in size digit
|
||||
# The digit is set to default 8
|
||||
# DecToHexList(1000) is ['0xE8', '0x03', '0x00', '0x00']
|
||||
# DecToHexList(1000, 6) is ['0xE8', '0x03', '0x00']
|
||||
#
|
||||
# @param Dec: The number in dec format
|
||||
# @param Digit: The needed digit of hex list
|
||||
#
|
||||
# @retval: A list for formatted hex string
|
||||
#
|
||||
def DecToHexList(Dec, Digit = 8):
|
||||
Hex = eval("'%0" + str(Digit) + "X' % int(Dec)" )
|
||||
List = []
|
||||
for Bit in range(Digit - 2, -1, -2):
|
||||
List.append(HexHeader + Hex[Bit:Bit + 2])
|
||||
return List
|
||||
|
||||
## Convert a acsii string to a hex list
|
||||
#
|
||||
# Convert a acsii string to a formatted hex list
|
||||
# AscToHexList('en-US') is ['0x65', '0x6E', '0x2D', '0x55', '0x53']
|
||||
#
|
||||
# @param Ascii: The acsii string
|
||||
#
|
||||
# @retval: A list for formatted hex string
|
||||
#
|
||||
def AscToHexList(Ascii):
|
||||
List = []
|
||||
for Item in Ascii:
|
||||
List.append('0x%2X' % ord(Item))
|
||||
|
||||
return List
|
||||
|
||||
## Create header of .h file
|
||||
#
|
||||
# Create a header of .h file
|
||||
#
|
||||
# @param BaseName: The basename of strings
|
||||
#
|
||||
# @retval Str: A string for .h file header
|
||||
#
|
||||
def CreateHFileHeader(BaseName):
|
||||
Str = ''
|
||||
for Item in H_C_FILE_HEADER:
|
||||
Str = WriteLine(Str, Item)
|
||||
Str = WriteLine(Str, '#ifndef _' + BaseName.upper() + '_STRINGS_DEFINE_H_')
|
||||
Str = WriteLine(Str, '#define _' + BaseName.upper() + '_STRINGS_DEFINE_H_')
|
||||
return Str
|
||||
|
||||
## Create content of .h file
|
||||
#
|
||||
# Create content of .h file
|
||||
#
|
||||
# @param BaseName: The basename of strings
|
||||
# @param UniObjectClass: A UniObjectClass instance
|
||||
#
|
||||
# @retval Str: A string of .h file content
|
||||
#
|
||||
def CreateHFileContent(BaseName, UniObjectClass):
|
||||
Str = ''
|
||||
ValueStartPtr = 60
|
||||
Line = COMMENT_DEFINE_STR + ' ' + LANGUAGE_NAME_STRING_NAME + ' ' * (ValueStartPtr - len(DEFINE_STR + LANGUAGE_NAME_STRING_NAME)) + DecToHexStr(0, 4) + COMMENT_NOT_REFERENCED
|
||||
Str = WriteLine(Str, Line)
|
||||
Line = COMMENT_DEFINE_STR + ' ' + PRINTABLE_LANGUAGE_NAME_STRING_NAME + ' ' * (ValueStartPtr - len(DEFINE_STR + PRINTABLE_LANGUAGE_NAME_STRING_NAME)) + DecToHexStr(1, 4) + COMMENT_NOT_REFERENCED
|
||||
Str = WriteLine(Str, Line)
|
||||
for Index in range(2, len(UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]])):
|
||||
StringItem = UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[0][0]][Index]
|
||||
Name = StringItem.StringName
|
||||
Token = StringItem.Token
|
||||
Referenced = StringItem.Referenced
|
||||
if Name != None:
|
||||
Line = ''
|
||||
if Referenced == True:
|
||||
Line = DEFINE_STR + ' ' + Name + ' ' * (ValueStartPtr - len(DEFINE_STR + Name)) + DecToHexStr(Token, 4)
|
||||
else:
|
||||
Line = COMMENT_DEFINE_STR + ' ' + Name + ' ' * (ValueStartPtr - len(DEFINE_STR + Name)) + DecToHexStr(Token, 4) + COMMENT_NOT_REFERENCED
|
||||
Str = WriteLine(Str, Line)
|
||||
|
||||
Str = WriteLine(Str, '')
|
||||
Str = WriteLine(Str, 'extern unsigned char ' + BaseName + 'Strings[];')
|
||||
return Str
|
||||
|
||||
## Create a complete .h file
|
||||
#
|
||||
# Create a complet .h file with file header and file content
|
||||
#
|
||||
# @param BaseName: The basename of strings
|
||||
# @param UniObjectClass: A UniObjectClass instance
|
||||
#
|
||||
# @retval Str: A string of complete .h file
|
||||
#
|
||||
def CreateHFile(BaseName, UniObjectClass):
|
||||
HFile = WriteLine('', CreateHFileContent(BaseName, UniObjectClass))
|
||||
|
||||
return HFile
|
||||
|
||||
## Create header of .c file
|
||||
#
|
||||
# Create a header of .c file
|
||||
#
|
||||
# @retval Str: A string for .c file header
|
||||
#
|
||||
def CreateCFileHeader():
|
||||
Str = ''
|
||||
for Item in H_C_FILE_HEADER:
|
||||
Str = WriteLine(Str, Item)
|
||||
|
||||
return Str
|
||||
|
||||
## Create a formatted string all items in an array
|
||||
#
|
||||
# Use ',' to join each item in an array, and break an new line when reaching the width (default is 16)
|
||||
#
|
||||
# @param Array: The array need to be formatted
|
||||
# @param Width: The line length, the default value is set to 16
|
||||
#
|
||||
# @retval ArrayItem: A string for all formatted array items
|
||||
#
|
||||
def CreateArrayItem(Array, Width = 16):
|
||||
MaxLength = Width
|
||||
Index = 0
|
||||
Line = ' '
|
||||
ArrayItem = ''
|
||||
|
||||
for Item in Array:
|
||||
if Index < MaxLength:
|
||||
Line = Line + Item + ', '
|
||||
Index = Index + 1
|
||||
else:
|
||||
ArrayItem = WriteLine(ArrayItem, Line)
|
||||
Line = ' ' + Item + ', '
|
||||
Index = 1
|
||||
ArrayItem = Write(ArrayItem, Line.rstrip())
|
||||
|
||||
return ArrayItem
|
||||
|
||||
## CreateCFileStringValue
|
||||
#
|
||||
# Create a line with string value
|
||||
#
|
||||
# @param Value: Value of the string
|
||||
#
|
||||
# @retval Str: A formatted string with string value
|
||||
#
|
||||
|
||||
def CreateCFileStringValue(Value):
|
||||
Value = [StringBlockType] + Value
|
||||
Str = WriteLine('', CreateArrayItem(Value))
|
||||
|
||||
return Str
|
||||
|
||||
|
||||
## Create content of .c file
|
||||
#
|
||||
# Create content of .c file
|
||||
#
|
||||
# @param BaseName: The basename of strings
|
||||
# @param UniObjectClass: A UniObjectClass instance
|
||||
#
|
||||
# @retval Str: A string of .c file content
|
||||
#
|
||||
def CreateCFileContent(BaseName, UniObjectClass, IsCompatibleMode):
|
||||
#
|
||||
# Init array length
|
||||
#
|
||||
TotalLength = EFI_HII_ARRAY_SIZE_LENGTH
|
||||
Str = ''
|
||||
Offset = 0
|
||||
|
||||
#
|
||||
# Create lines for each language's strings
|
||||
#
|
||||
for IndexI in range(len(UniObjectClass.LanguageDef)):
|
||||
Language = UniObjectClass.LanguageDef[IndexI][0]
|
||||
LangPrintName = UniObjectClass.LanguageDef[IndexI][1]
|
||||
|
||||
StrStringValue = ''
|
||||
ArrayLength = 0
|
||||
NumberOfUseOhterLangDef = 0
|
||||
Index = 0
|
||||
for IndexJ in range(1, len(UniObjectClass.OrderedStringList[UniObjectClass.LanguageDef[IndexI][0]])):
|
||||
Item = UniObjectClass.FindByToken(IndexJ, Language)
|
||||
Name = Item.StringName
|
||||
Value = Item.StringValueByteList
|
||||
Referenced = Item.Referenced
|
||||
Token = Item.Token
|
||||
Length = Item.Length
|
||||
UseOtherLangDef = Item.UseOtherLangDef
|
||||
|
||||
if UseOtherLangDef != '' and Referenced:
|
||||
NumberOfUseOhterLangDef = NumberOfUseOhterLangDef + 1
|
||||
Index = Index + 1
|
||||
else:
|
||||
if NumberOfUseOhterLangDef > 0:
|
||||
StrStringValue = WriteLine(StrStringValue, CreateArrayItem([StringSkipType] + DecToHexList(NumberOfUseOhterLangDef, 4)))
|
||||
NumberOfUseOhterLangDef = 0
|
||||
ArrayLength = ArrayLength + 3
|
||||
if Referenced and Item.Token > 0:
|
||||
Index = Index + 1
|
||||
StrStringValue = WriteLine(StrStringValue, "// %s: %s:%s" % (DecToHexStr(Index, 4), Name, DecToHexStr(Token, 4)))
|
||||
StrStringValue = Write(StrStringValue, CreateCFileStringValue(Value))
|
||||
Offset = Offset + Length
|
||||
ArrayLength = ArrayLength + Item.Length + 1 # 1 is for the length of string type
|
||||
|
||||
#
|
||||
# EFI_HII_PACKAGE_HEADER
|
||||
#
|
||||
Offset = EFI_HII_STRING_PACKAGE_HDR_LENGTH + len(Language) + 1
|
||||
ArrayLength = Offset + ArrayLength + 1
|
||||
|
||||
#
|
||||
# Create PACKAGE HEADER
|
||||
#
|
||||
Str = WriteLine(Str, '// PACKAGE HEADER\n')
|
||||
TotalLength = TotalLength + ArrayLength
|
||||
|
||||
List = DecToHexList(ArrayLength, 6) + \
|
||||
[StringPackageType] + \
|
||||
DecToHexList(Offset) + \
|
||||
DecToHexList(Offset) + \
|
||||
DecToHexList(EFI_HII_LANGUAGE_WINDOW, EFI_HII_LANGUAGE_WINDOW_LENGTH * 2) * EFI_HII_LANGUAGE_WINDOW_NUMBER + \
|
||||
DecToHexList(EFI_STRING_ID, 4) + \
|
||||
AscToHexList(Language) + \
|
||||
DecToHexList(0, 2)
|
||||
Str = WriteLine(Str, CreateArrayItem(List, 16) + '\n')
|
||||
|
||||
#
|
||||
# Create PACKAGE DATA
|
||||
#
|
||||
Str = WriteLine(Str, '// PACKAGE DATA\n')
|
||||
Str = Write(Str, StrStringValue)
|
||||
|
||||
#
|
||||
# Add an EFI_HII_SIBT_END at last
|
||||
#
|
||||
Str = WriteLine(Str, ' ' + EFI_HII_SIBT_END + ",")
|
||||
|
||||
#
|
||||
# Create line for string variable name
|
||||
# "unsigned char $(BaseName)Strings[] = {"
|
||||
#
|
||||
AllStr = WriteLine('', CHAR_ARRAY_DEFIN + ' ' + BaseName + COMMON_FILE_NAME + '[] = {\n' )
|
||||
|
||||
#
|
||||
# Create FRAMEWORK_EFI_HII_PACK_HEADER in compatible mode
|
||||
#
|
||||
if IsCompatibleMode:
|
||||
AllStr = WriteLine(AllStr, '// FRAMEWORK PACKAGE HEADER Length')
|
||||
AllStr = WriteLine(AllStr, CreateArrayItem(DecToHexList(TotalLength + 2)) + '\n')
|
||||
AllStr = WriteLine(AllStr, '// FRAMEWORK PACKAGE HEADER Type')
|
||||
AllStr = WriteLine(AllStr, CreateArrayItem(DecToHexList(2, 4)) + '\n')
|
||||
|
||||
#
|
||||
# Create whole array length in UEFI mode
|
||||
#
|
||||
if not IsCompatibleMode:
|
||||
AllStr = WriteLine(AllStr, '// STRGATHER_OUTPUT_HEADER')
|
||||
AllStr = WriteLine(AllStr, CreateArrayItem(DecToHexList(TotalLength)) + '\n')
|
||||
|
||||
#
|
||||
# Join package data
|
||||
#
|
||||
AllStr = Write(AllStr, Str)
|
||||
|
||||
return AllStr
|
||||
|
||||
## Create end of .c file
|
||||
#
|
||||
# Create end of .c file
|
||||
#
|
||||
# @retval Str: A string of .h file end
|
||||
#
|
||||
def CreateCFileEnd():
|
||||
Str = Write('', '};')
|
||||
return Str
|
||||
|
||||
## Create a .c file
|
||||
#
|
||||
# Create a complete .c file
|
||||
#
|
||||
# @param BaseName: The basename of strings
|
||||
# @param UniObjectClass: A UniObjectClass instance
|
||||
#
|
||||
# @retval CFile: A string of complete .c file
|
||||
#
|
||||
def CreateCFile(BaseName, UniObjectClass, IsCompatibleMode):
|
||||
CFile = ''
|
||||
#CFile = WriteLine(CFile, CreateCFileHeader())
|
||||
CFile = WriteLine(CFile, CreateCFileContent(BaseName, UniObjectClass, IsCompatibleMode))
|
||||
CFile = WriteLine(CFile, CreateCFileEnd())
|
||||
return CFile
|
||||
|
||||
## GetFileList
|
||||
#
|
||||
# Get a list for all files
|
||||
#
|
||||
# @param IncludeList: A list of all path to be searched
|
||||
# @param SkipList: A list of all types of file could be skipped
|
||||
#
|
||||
# @retval FileList: A list of all files found
|
||||
#
|
||||
def GetFileList(SourceFileList, IncludeList, SkipList):
|
||||
if IncludeList == None:
|
||||
EdkLogger.error("UnicodeStringGather", AUTOGEN_ERROR, "Include path for unicode file is not defined")
|
||||
|
||||
FileList = []
|
||||
if SkipList == None:
|
||||
SkipList = []
|
||||
|
||||
for File in SourceFileList:
|
||||
for Dir in IncludeList:
|
||||
if not os.path.exists(Dir):
|
||||
continue
|
||||
File = os.path.join(Dir, File.Path)
|
||||
#
|
||||
# Ignore Dir
|
||||
#
|
||||
if os.path.isfile(File) != True:
|
||||
continue
|
||||
#
|
||||
# Ignore file listed in skip list
|
||||
#
|
||||
IsSkip = False
|
||||
for Skip in SkipList:
|
||||
if os.path.splitext(File)[1].upper() == Skip.upper():
|
||||
EdkLogger.verbose("Skipped %s for string token uses search" % File)
|
||||
IsSkip = True
|
||||
break
|
||||
|
||||
if not IsSkip:
|
||||
FileList.append(File)
|
||||
|
||||
break
|
||||
|
||||
return FileList
|
||||
|
||||
## SearchString
|
||||
#
|
||||
# Search whether all string defined in UniObjectClass are referenced
|
||||
# All string used should be set to Referenced
|
||||
#
|
||||
# @param UniObjectClass: Input UniObjectClass
|
||||
# @param FileList: Search path list
|
||||
#
|
||||
# @retval UniObjectClass: UniObjectClass after searched
|
||||
#
|
||||
def SearchString(UniObjectClass, FileList):
|
||||
if FileList == []:
|
||||
return UniObjectClass
|
||||
|
||||
for File in FileList:
|
||||
if os.path.isfile(File):
|
||||
Lines = open(File, 'r')
|
||||
for Line in Lines:
|
||||
StringTokenList = STRING_TOKEN.findall(Line)
|
||||
for StrName in StringTokenList:
|
||||
EdkLogger.debug(EdkLogger.DEBUG_5, "Found string identifier: " + StrName)
|
||||
UniObjectClass.SetStringReferenced(StrName)
|
||||
|
||||
UniObjectClass.ReToken()
|
||||
|
||||
return UniObjectClass
|
||||
|
||||
## GetStringFiles
|
||||
#
|
||||
# This function is used for UEFI2.1 spec
|
||||
#
|
||||
#
|
||||
def GetStringFiles(UniFilList, SourceFileList, IncludeList, SkipList, BaseName, IsCompatibleMode = False, ShellMode = False):
|
||||
Status = True
|
||||
ErrorMessage = ''
|
||||
|
||||
if len(UniFilList) > 0:
|
||||
if ShellMode:
|
||||
#
|
||||
# support ISO 639-2 codes in .UNI files of EDK Shell
|
||||
#
|
||||
Uni = UniFileClassObject(UniFilList, True)
|
||||
else:
|
||||
Uni = UniFileClassObject(UniFilList, IsCompatibleMode)
|
||||
else:
|
||||
EdkLogger.error("UnicodeStringGather", AUTOGEN_ERROR, 'No unicode files given')
|
||||
|
||||
FileList = GetFileList(SourceFileList, IncludeList, SkipList)
|
||||
|
||||
Uni = SearchString(Uni, FileList)
|
||||
|
||||
HFile = CreateHFile(BaseName, Uni)
|
||||
CFile = CreateCFile(BaseName, Uni, IsCompatibleMode)
|
||||
|
||||
return HFile, CFile
|
||||
|
||||
#
|
||||
# Write an item
|
||||
#
|
||||
def Write(Target, Item):
|
||||
return Target + Item
|
||||
|
||||
#
|
||||
# Write an item with a break line
|
||||
#
|
||||
def WriteLine(Target, Item):
|
||||
return Target + Item + '\n'
|
||||
|
||||
# This acts like the main() function for the script, unless it is 'import'ed into another
|
||||
# script.
|
||||
if __name__ == '__main__':
|
||||
EdkLogger.info('start')
|
||||
|
||||
UniFileList = [
|
||||
r'C:\\Edk\\Strings2.uni',
|
||||
r'C:\\Edk\\Strings.uni'
|
||||
]
|
||||
|
||||
SrcFileList = []
|
||||
for Root, Dirs, Files in os.walk('C:\\Edk'):
|
||||
for File in Files:
|
||||
SrcFileList.append(File)
|
||||
|
||||
IncludeList = [
|
||||
r'C:\\Edk'
|
||||
]
|
||||
|
||||
SkipList = ['.inf', '.uni']
|
||||
BaseName = 'DriverSample'
|
||||
(h, c) = GetStringFiles(UniFileList, SrcFileList, IncludeList, SkipList, BaseName, True)
|
||||
hfile = open('unistring.h', 'w')
|
||||
cfile = open('unistring.c', 'w')
|
||||
hfile.write(h)
|
||||
cfile.write(c)
|
||||
|
||||
EdkLogger.info('end')
|
530
BaseTools/Source/Python/AutoGen/UniClassObject.py
Normal file
530
BaseTools/Source/Python/AutoGen/UniClassObject.py
Normal file
@@ -0,0 +1,530 @@
|
||||
# Copyright (c) 2007, Intel Corporation
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are licensed and made available under the terms and conditions of the BSD License
|
||||
# which accompanies this distribution. The full text of the license may be found at
|
||||
# http://opensource.org/licenses/bsd-license.php
|
||||
#
|
||||
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
#
|
||||
#This file is used to collect all defined strings in multiple uni files
|
||||
#
|
||||
|
||||
##
|
||||
# Import Modules
|
||||
#
|
||||
import os, codecs, re
|
||||
import Common.EdkLogger as EdkLogger
|
||||
from Common.BuildToolError import *
|
||||
from Common.String import GetLineNo
|
||||
from Common.Misc import PathClass
|
||||
|
||||
##
|
||||
# Static definitions
|
||||
#
|
||||
UNICODE_WIDE_CHAR = u'\\wide'
|
||||
UNICODE_NARROW_CHAR = u'\\narrow'
|
||||
UNICODE_NON_BREAKING_CHAR = u'\\nbr'
|
||||
UNICODE_UNICODE_CR = '\r'
|
||||
UNICODE_UNICODE_LF = '\n'
|
||||
|
||||
NARROW_CHAR = u'\uFFF0'
|
||||
WIDE_CHAR = u'\uFFF1'
|
||||
NON_BREAKING_CHAR = u'\uFFF2'
|
||||
CR = u'\u000D'
|
||||
LF = u'\u000A'
|
||||
NULL = u'\u0000'
|
||||
TAB = u'\t'
|
||||
BACK_SPLASH = u'\\'
|
||||
|
||||
gIncludePattern = re.compile("^#include +[\"<]+([^\"< >]+)[>\"]+$", re.MULTILINE | re.UNICODE)
|
||||
|
||||
## Convert a python unicode string to a normal string
|
||||
#
|
||||
# Convert a python unicode string to a normal string
|
||||
# UniToStr(u'I am a string') is 'I am a string'
|
||||
#
|
||||
# @param Uni: The python unicode string
|
||||
#
|
||||
# @retval: The formatted normal string
|
||||
#
|
||||
def UniToStr(Uni):
|
||||
return repr(Uni)[2:-1]
|
||||
|
||||
## Convert a unicode string to a Hex list
|
||||
#
|
||||
# Convert a unicode string to a Hex list
|
||||
# UniToHexList('ABC') is ['0x41', '0x00', '0x42', '0x00', '0x43', '0x00']
|
||||
#
|
||||
# @param Uni: The python unicode string
|
||||
#
|
||||
# @retval List: The formatted hex list
|
||||
#
|
||||
def UniToHexList(Uni):
|
||||
List = []
|
||||
for Item in Uni:
|
||||
Temp = '%04X' % ord(Item)
|
||||
List.append('0x' + Temp[2:4])
|
||||
List.append('0x' + Temp[0:2])
|
||||
return List
|
||||
|
||||
LangConvTable = {'eng':'en', 'fra':'fr', \
|
||||
'aar':'aa', 'abk':'ab', 'ave':'ae', 'afr':'af', 'aka':'ak', 'amh':'am', \
|
||||
'arg':'an', 'ara':'ar', 'asm':'as', 'ava':'av', 'aym':'ay', 'aze':'az', \
|
||||
'bak':'ba', 'bel':'be', 'bul':'bg', 'bih':'bh', 'bis':'bi', 'bam':'bm', \
|
||||
'ben':'bn', 'bod':'bo', 'bre':'br', 'bos':'bs', 'cat':'ca', 'che':'ce', \
|
||||
'cha':'ch', 'cos':'co', 'cre':'cr', 'ces':'cs', 'chu':'cu', 'chv':'cv', \
|
||||
'cym':'cy', 'dan':'da', 'deu':'de', 'div':'dv', 'dzo':'dz', 'ewe':'ee', \
|
||||
'ell':'el', 'epo':'eo', 'spa':'es', 'est':'et', 'eus':'eu', 'fas':'fa', \
|
||||
'ful':'ff', 'fin':'fi', 'fij':'fj', 'fao':'fo', 'fry':'fy', 'gle':'ga', \
|
||||
'gla':'gd', 'glg':'gl', 'grn':'gn', 'guj':'gu', 'glv':'gv', 'hau':'ha', \
|
||||
'heb':'he', 'hin':'hi', 'hmo':'ho', 'hrv':'hr', 'hat':'ht', 'hun':'hu', \
|
||||
'hye':'hy', 'her':'hz', 'ina':'ia', 'ind':'id', 'ile':'ie', 'ibo':'ig', \
|
||||
'iii':'ii', 'ipk':'ik', 'ido':'io', 'isl':'is', 'ita':'it', 'iku':'iu', \
|
||||
'jpn':'ja', 'jav':'jv', 'kat':'ka', 'kon':'kg', 'kik':'ki', 'kua':'kj', \
|
||||
'kaz':'kk', 'kal':'kl', 'khm':'km', 'kan':'kn', 'kor':'ko', 'kau':'kr', \
|
||||
'kas':'ks', 'kur':'ku', 'kom':'kv', 'cor':'kw', 'kir':'ky', 'lat':'la', \
|
||||
'ltz':'lb', 'lug':'lg', 'lim':'li', 'lin':'ln', 'lao':'lo', 'lit':'lt', \
|
||||
'lub':'lu', 'lav':'lv', 'mlg':'mg', 'mah':'mh', 'mri':'mi', 'mkd':'mk', \
|
||||
'mal':'ml', 'mon':'mn', 'mar':'mr', 'msa':'ms', 'mlt':'mt', 'mya':'my', \
|
||||
'nau':'na', 'nob':'nb', 'nde':'nd', 'nep':'ne', 'ndo':'ng', 'nld':'nl', \
|
||||
'nno':'nn', 'nor':'no', 'nbl':'nr', 'nav':'nv', 'nya':'ny', 'oci':'oc', \
|
||||
'oji':'oj', 'orm':'om', 'ori':'or', 'oss':'os', 'pan':'pa', 'pli':'pi', \
|
||||
'pol':'pl', 'pus':'ps', 'por':'pt', 'que':'qu', 'roh':'rm', 'run':'rn', \
|
||||
'ron':'ro', 'rus':'ru', 'kin':'rw', 'san':'sa', 'srd':'sc', 'snd':'sd', \
|
||||
'sme':'se', 'sag':'sg', 'sin':'si', 'slk':'sk', 'slv':'sl', 'smo':'sm', \
|
||||
'sna':'sn', 'som':'so', 'sqi':'sq', 'srp':'sr', 'ssw':'ss', 'sot':'st', \
|
||||
'sun':'su', 'swe':'sv', 'swa':'sw', 'tam':'ta', 'tel':'te', 'tgk':'tg', \
|
||||
'tha':'th', 'tir':'ti', 'tuk':'tk', 'tgl':'tl', 'tsn':'tn', 'ton':'to', \
|
||||
'tur':'tr', 'tso':'ts', 'tat':'tt', 'twi':'tw', 'tah':'ty', 'uig':'ug', \
|
||||
'ukr':'uk', 'urd':'ur', 'uzb':'uz', 'ven':'ve', 'vie':'vi', 'vol':'vo', \
|
||||
'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \
|
||||
'zho':'zh', 'zul':'zu'}
|
||||
|
||||
## GetLanguageCode
|
||||
#
|
||||
# Check the language code read from .UNI file and convert ISO 639-2 codes to RFC 4646 codes if appropriate
|
||||
# ISO 639-2 language codes supported in compatiblity mode
|
||||
# RFC 4646 language codes supported in native mode
|
||||
#
|
||||
# @param LangName: Language codes read from .UNI file
|
||||
#
|
||||
# @retval LangName: Valid lanugage code in RFC 4646 format or None
|
||||
#
|
||||
def GetLanguageCode(LangName, IsCompatibleMode, File):
|
||||
global LangConvTable
|
||||
|
||||
length = len(LangName)
|
||||
if IsCompatibleMode:
|
||||
if length == 3 and LangName.isalpha():
|
||||
TempLangName = LangConvTable.get(LangName.lower())
|
||||
if TempLangName != None:
|
||||
return TempLangName
|
||||
return LangName
|
||||
else:
|
||||
EdkLogger.error("Unicode File Parser", FORMAT_INVALID, "Invalid ISO 639-2 language code : %s" % LangName, File)
|
||||
|
||||
if length == 2:
|
||||
if LangName.isalpha():
|
||||
return LangName
|
||||
elif length == 3:
|
||||
if LangName.isalpha() and LangConvTable.get(LangName.lower()) == None:
|
||||
return LangName
|
||||
elif length == 5:
|
||||
if LangName[0:2].isalpha() and LangName[2] == '-':
|
||||
return LangName
|
||||
elif length >= 6:
|
||||
if LangName[0:2].isalpha() and LangName[2] == '-':
|
||||
return LangName
|
||||
if LangName[0:3].isalpha() and LangConvTable.get(LangName.lower()) == None and LangName[3] == '-':
|
||||
return LangName
|
||||
|
||||
EdkLogger.error("Unicode File Parser", FORMAT_INVALID, "Invalid RFC 4646 language code : %s" % LangName, File)
|
||||
|
||||
## StringDefClassObject
|
||||
#
|
||||
# A structure for language definition
|
||||
#
|
||||
class StringDefClassObject(object):
|
||||
def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''):
|
||||
self.StringName = ''
|
||||
self.StringNameByteList = []
|
||||
self.StringValue = ''
|
||||
self.StringValueByteList = ''
|
||||
self.Token = 0
|
||||
self.Referenced = Referenced
|
||||
self.UseOtherLangDef = UseOtherLangDef
|
||||
self.Length = 0
|
||||
|
||||
if Name != None:
|
||||
self.StringName = Name
|
||||
self.StringNameByteList = UniToHexList(Name)
|
||||
if Value != None:
|
||||
self.StringValue = Value + u'\x00' # Add a NULL at string tail
|
||||
self.StringValueByteList = UniToHexList(self.StringValue)
|
||||
self.Length = len(self.StringValueByteList)
|
||||
if Token != None:
|
||||
self.Token = Token
|
||||
|
||||
def __str__(self):
|
||||
return repr(self.StringName) + ' ' + \
|
||||
repr(self.Token) + ' ' + \
|
||||
repr(self.Referenced) + ' ' + \
|
||||
repr(self.StringValue) + ' ' + \
|
||||
repr(self.UseOtherLangDef)
|
||||
|
||||
## UniFileClassObject
|
||||
#
|
||||
# A structure for .uni file definition
|
||||
#
|
||||
class UniFileClassObject(object):
|
||||
def __init__(self, FileList = [], IsCompatibleMode = False):
|
||||
self.FileList = FileList
|
||||
self.Token = 2
|
||||
self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
|
||||
self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
|
||||
self.IsCompatibleMode = IsCompatibleMode
|
||||
|
||||
if len(self.FileList) > 0:
|
||||
self.LoadUniFiles(FileList)
|
||||
|
||||
#
|
||||
# Get Language definition
|
||||
#
|
||||
def GetLangDef(self, File, Line):
|
||||
Lang = Line.split()
|
||||
if len(Lang) != 3:
|
||||
try:
|
||||
FileIn = codecs.open(File, mode='rb', encoding='utf-16').read()
|
||||
except UnicodeError, X:
|
||||
EdkLogger.error("build", FILE_READ_FAILURE, "File read failure: %s" % str(X), ExtraData=File);
|
||||
except:
|
||||
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=File);
|
||||
LineNo = GetLineNo(FileIn, Line, False)
|
||||
EdkLogger.error("Unicode File Parser", PARSER_ERROR, "Wrong language definition",
|
||||
ExtraData="""%s\n\t*Correct format is like '#langdef eng "English"'""" % Line, File = File, Line = LineNo)
|
||||
else:
|
||||
LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
|
||||
LangPrintName = Lang[2][1:-1]
|
||||
|
||||
IsLangInDef = False
|
||||
for Item in self.LanguageDef:
|
||||
if Item[0] == LangName:
|
||||
IsLangInDef = True
|
||||
break;
|
||||
|
||||
if not IsLangInDef:
|
||||
self.LanguageDef.append([LangName, LangPrintName])
|
||||
|
||||
#
|
||||
# Add language string
|
||||
#
|
||||
self.AddStringToList(u'$LANGUAGE_NAME', LangName, LangName, 0, True, Index=0)
|
||||
self.AddStringToList(u'$PRINTABLE_LANGUAGE_NAME', LangName, LangPrintName, 1, True, Index=1)
|
||||
|
||||
return True
|
||||
|
||||
#
|
||||
# Get String name and value
|
||||
#
|
||||
def GetStringObject(self, Item):
|
||||
Name = ''
|
||||
Language = ''
|
||||
Value = ''
|
||||
|
||||
Name = Item.split()[1]
|
||||
LanguageList = Item.split(u'#language ')
|
||||
for IndexI in range(len(LanguageList)):
|
||||
if IndexI == 0:
|
||||
continue
|
||||
else:
|
||||
Language = LanguageList[IndexI].split()[0]
|
||||
Value = LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')] #.replace(u'\r\n', u'')
|
||||
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
|
||||
self.AddStringToList(Name, Language, Value)
|
||||
|
||||
#
|
||||
# Get include file list and load them
|
||||
#
|
||||
def GetIncludeFile(self, Item, Dir):
|
||||
FileName = Item[Item.find(u'#include ') + len(u'#include ') :Item.find(u' ', len(u'#include '))][1:-1]
|
||||
self.LoadUniFile(FileName)
|
||||
|
||||
#
|
||||
# Pre-process before parse .uni file
|
||||
#
|
||||
def PreProcess(self, File):
|
||||
if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
|
||||
EdkLogger.error("Unicode File Parser", FILE_NOT_FOUND, ExtraData=File.Path)
|
||||
|
||||
Dir = File.Dir
|
||||
try:
|
||||
FileIn = codecs.open(File.Path, mode='rb', encoding='utf-16').readlines()
|
||||
except UnicodeError, X:
|
||||
EdkLogger.error("build", FILE_READ_FAILURE, "File read failure: %s" % str(X), ExtraData=File.Path);
|
||||
except:
|
||||
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=File.Path);
|
||||
|
||||
Lines = []
|
||||
#
|
||||
# Use unique identifier
|
||||
#
|
||||
for Line in FileIn:
|
||||
Line = Line.strip()
|
||||
#
|
||||
# Ignore comment line and empty line
|
||||
#
|
||||
if Line == u'' or Line.startswith(u'//'):
|
||||
continue
|
||||
Line = Line.replace(u'/langdef', u'#langdef')
|
||||
Line = Line.replace(u'/string', u'#string')
|
||||
Line = Line.replace(u'/language', u'#language')
|
||||
Line = Line.replace(u'/include', u'#include')
|
||||
|
||||
Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
|
||||
Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
|
||||
Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
|
||||
|
||||
Line = Line.replace(u'\\\\', u'\u0006')
|
||||
Line = Line.replace(u'\\r\\n', CR + LF)
|
||||
Line = Line.replace(u'\\n', CR + LF)
|
||||
Line = Line.replace(u'\\r', CR)
|
||||
Line = Line.replace(u'\\t', u'\t')
|
||||
Line = Line.replace(u'''\"''', u'''"''')
|
||||
Line = Line.replace(u'\t', u' ')
|
||||
Line = Line.replace(u'\u0006', u'\\')
|
||||
|
||||
# if Line.find(u'\\x'):
|
||||
# hex = Line[Line.find(u'\\x') + 2 : Line.find(u'\\x') + 6]
|
||||
# hex = "u'\\u" + hex + "'"
|
||||
|
||||
IncList = gIncludePattern.findall(Line)
|
||||
if len(IncList) == 1:
|
||||
Lines.extend(self.PreProcess(PathClass(str(IncList[0]), Dir)))
|
||||
continue
|
||||
|
||||
Lines.append(Line)
|
||||
|
||||
return Lines
|
||||
|
||||
#
|
||||
# Load a .uni file
|
||||
#
|
||||
def LoadUniFile(self, File = None):
|
||||
if File == None:
|
||||
EdkLogger.error("Unicode File Parser", PARSER_ERROR, 'No unicode file is given')
|
||||
self.File = File
|
||||
#
|
||||
# Process special char in file
|
||||
#
|
||||
Lines = self.PreProcess(File)
|
||||
|
||||
#
|
||||
# Get Unicode Information
|
||||
#
|
||||
for IndexI in range(len(Lines)):
|
||||
Line = Lines[IndexI]
|
||||
if (IndexI + 1) < len(Lines):
|
||||
SecondLine = Lines[IndexI + 1]
|
||||
if (IndexI + 2) < len(Lines):
|
||||
ThirdLine = Lines[IndexI + 2]
|
||||
|
||||
#
|
||||
# Get Language def information
|
||||
#
|
||||
if Line.find(u'#langdef ') >= 0:
|
||||
self.GetLangDef(File, Line)
|
||||
continue
|
||||
|
||||
Name = ''
|
||||
Language = ''
|
||||
Value = ''
|
||||
#
|
||||
# Get string def information format 1 as below
|
||||
#
|
||||
# #string MY_STRING_1
|
||||
# #language eng
|
||||
# My first English string line 1
|
||||
# My first English string line 2
|
||||
# #string MY_STRING_1
|
||||
# #language spa
|
||||
# Mi segunda secuencia 1
|
||||
# Mi segunda secuencia 2
|
||||
#
|
||||
if Line.find(u'#string ') >= 0 and Line.find(u'#language ') < 0 and \
|
||||
SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
|
||||
ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
|
||||
Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
|
||||
Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
|
||||
for IndexJ in range(IndexI + 2, len(Lines)):
|
||||
if Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') < 0:
|
||||
Value = Value + Lines[IndexJ]
|
||||
else:
|
||||
IndexI = IndexJ
|
||||
break
|
||||
# Value = Value.replace(u'\r\n', u'')
|
||||
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
|
||||
self.AddStringToList(Name, Language, Value)
|
||||
continue
|
||||
|
||||
#
|
||||
# Get string def information format 2 as below
|
||||
#
|
||||
# #string MY_STRING_1 #language eng "My first English string line 1"
|
||||
# "My first English string line 2"
|
||||
# #language spa "Mi segunda secuencia 1"
|
||||
# "Mi segunda secuencia 2"
|
||||
# #string MY_STRING_2 #language eng "My first English string line 1"
|
||||
# "My first English string line 2"
|
||||
# #string MY_STRING_2 #language spa "Mi segunda secuencia 1"
|
||||
# "Mi segunda secuencia 2"
|
||||
#
|
||||
if Line.find(u'#string ') >= 0 and Line.find(u'#language ') >= 0:
|
||||
StringItem = Line
|
||||
for IndexJ in range(IndexI + 1, len(Lines)):
|
||||
if Lines[IndexJ].find(u'#string ') >= 0 and Lines[IndexJ].find(u'#language ') >= 0:
|
||||
IndexI = IndexJ
|
||||
break
|
||||
elif Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') >= 0:
|
||||
StringItem = StringItem + Lines[IndexJ]
|
||||
elif Lines[IndexJ].count(u'\"') >= 2:
|
||||
StringItem = StringItem[ : StringItem.rfind(u'\"')] + Lines[IndexJ][Lines[IndexJ].find(u'\"') + len(u'\"') : ]
|
||||
self.GetStringObject(StringItem)
|
||||
continue
|
||||
|
||||
#
|
||||
# Load multiple .uni files
|
||||
#
|
||||
def LoadUniFiles(self, FileList = []):
|
||||
if len(FileList) > 0:
|
||||
if len(FileList) > 1:
|
||||
NewList = [];
|
||||
for File in FileList:
|
||||
NewList.append (File)
|
||||
NewList.sort()
|
||||
for File in NewList:
|
||||
self.LoadUniFile(File)
|
||||
else:
|
||||
for File in FileList:
|
||||
self.LoadUniFile(File)
|
||||
|
||||
#
|
||||
# Add a string to list
|
||||
#
|
||||
def AddStringToList(self, Name, Language, Value, Token = None, Referenced = False, UseOtherLangDef = '', Index = -1):
|
||||
if Language not in self.OrderedStringList:
|
||||
self.OrderedStringList[Language] = []
|
||||
|
||||
IsAdded = False
|
||||
for Item in self.OrderedStringList[Language]:
|
||||
if Name == Item.StringName:
|
||||
IsAdded = True
|
||||
break
|
||||
if not IsAdded:
|
||||
Token = len(self.OrderedStringList[Language])
|
||||
if Index == -1:
|
||||
self.OrderedStringList[Language].append(StringDefClassObject(Name, Value, Referenced, Token, UseOtherLangDef))
|
||||
else:
|
||||
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name, Value, Referenced, Token, UseOtherLangDef))
|
||||
|
||||
#
|
||||
# Set the string as referenced
|
||||
#
|
||||
def SetStringReferenced(self, Name):
|
||||
for Lang in self.OrderedStringList:
|
||||
for Item in self.OrderedStringList[Lang]:
|
||||
if Name == Item.StringName:
|
||||
Item.Referenced = True
|
||||
break
|
||||
#
|
||||
# Search the string in language definition by Name
|
||||
#
|
||||
def FindStringValue(self, Name, Lang):
|
||||
for Item in self.OrderedStringList[Lang]:
|
||||
if Item.StringName == Name:
|
||||
return Item
|
||||
|
||||
return None
|
||||
|
||||
#
|
||||
# Search the string in language definition by Token
|
||||
#
|
||||
def FindByToken(self, Token, Lang):
|
||||
for Item in self.OrderedStringList[Lang]:
|
||||
if Item.Token == Token:
|
||||
return Item
|
||||
|
||||
return None
|
||||
|
||||
#
|
||||
# Re-order strings and re-generate tokens
|
||||
#
|
||||
def ReToken(self):
|
||||
#
|
||||
# Search each string to find if it is defined for each language
|
||||
# Use secondary language value to replace if missing in any one language
|
||||
#
|
||||
for IndexI in range(0, len(self.LanguageDef)):
|
||||
LangKey = self.LanguageDef[IndexI][0]
|
||||
for Item in self.OrderedStringList[LangKey]:
|
||||
Name = Item.StringName
|
||||
Value = Item.StringValue[0:-1]
|
||||
Referenced = Item.Referenced
|
||||
Index = self.OrderedStringList[LangKey].index(Item)
|
||||
for IndexJ in range(0, len(self.LanguageDef)):
|
||||
LangFind = self.LanguageDef[IndexJ][0]
|
||||
if self.FindStringValue(Name, LangFind) == None:
|
||||
EdkLogger.debug(EdkLogger.DEBUG_5, Name)
|
||||
Token = len(self.OrderedStringList[LangFind])
|
||||
self.AddStringToList(Name, LangFind, Value, Token, Referenced, LangKey, Index)
|
||||
|
||||
#
|
||||
# Retoken
|
||||
#
|
||||
# First re-token the first language
|
||||
LangName = self.LanguageDef[0][0]
|
||||
ReferencedStringList = []
|
||||
NotReferencedStringList = []
|
||||
Token = 0
|
||||
for Item in self.OrderedStringList[LangName]:
|
||||
if Item.Referenced == True:
|
||||
Item.Token = Token
|
||||
ReferencedStringList.append(Item)
|
||||
Token = Token + 1
|
||||
else:
|
||||
NotReferencedStringList.append(Item)
|
||||
self.OrderedStringList[LangName] = ReferencedStringList
|
||||
for Index in range(len(NotReferencedStringList)):
|
||||
NotReferencedStringList[Index].Token = Token + Index
|
||||
self.OrderedStringList[LangName].append(NotReferencedStringList[Index])
|
||||
|
||||
#
|
||||
# Adjust the orders of other languages
|
||||
#
|
||||
for IndexOfLanguage in range(1, len(self.LanguageDef)):
|
||||
for OrderedString in self.OrderedStringList[LangName]:
|
||||
for UnOrderedString in self.OrderedStringList[self.LanguageDef[IndexOfLanguage][0]]:
|
||||
if OrderedString.StringName == UnOrderedString.StringName:
|
||||
UnOrderedString.Token = OrderedString.Token
|
||||
break
|
||||
|
||||
#
|
||||
# Show the instance itself
|
||||
#
|
||||
def ShowMe(self):
|
||||
print self.LanguageDef
|
||||
#print self.OrderedStringList
|
||||
for Item in self.OrderedStringList:
|
||||
print Item
|
||||
for Member in self.OrderedStringList[Item]:
|
||||
print str(Member)
|
||||
|
||||
# This acts like the main() function for the script, unless it is 'import'ed into another
|
||||
# script.
|
||||
if __name__ == '__main__':
|
||||
EdkLogger.Initialize()
|
||||
EdkLogger.SetLevel(EdkLogger.DEBUG_0)
|
||||
a = UniFileClassObject(['C:\\Edk\\Strings.uni', 'C:\\Edk\\Strings2.uni'])
|
||||
a.ReToken()
|
||||
a.ShowMe()
|
10
BaseTools/Source/Python/AutoGen/__init__.py
Normal file
10
BaseTools/Source/Python/AutoGen/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
# Copyright (c) 2007, Intel Corporation
|
||||
# All rights reserved. This program and the accompanying materials
|
||||
# are licensed and made available under the terms and conditions of the BSD License
|
||||
# which accompanies this distribution. The full text of the license may be found at
|
||||
# http://opensource.org/licenses/bsd-license.php
|
||||
#
|
||||
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
||||
|
||||
__all__ = ["AutoGen"]
|
Reference in New Issue
Block a user