This patch is going to:

1.	Add a recovery mode for UPT failure
2.	Add UNI file support
3.	Add binary file header support
4.	Add support for PCD error message
5.	Add support for replace
6.	Format generated INF/DEC files
7.	Update dependency check
8.	Other minor fixes


Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Hess Chen <hesheng.chen@intel.com>
Reviewed-by: Gao, Liming <liming.gao@intel.com>

git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@15896 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
Hess Chen
2014-08-26 05:58:02 +00:00
committed by hchen30
parent f0aa06e385
commit 421ccda307
56 changed files with 5945 additions and 1710 deletions

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to define comment generating interface
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -24,8 +24,18 @@ from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_INF_GUIDTYPE_VAR
from Library.DataType import USAGE_ITEM_NOTIFY
from Library.DataType import ITEM_UNDEFINED
from Library.DataType import LANGUAGE_EN_US
from Library.DataType import TAB_HEADER_COMMENT
from Library.DataType import TAB_BINARY_HEADER_COMMENT
from Library.DataType import TAB_COMMENT_SPLIT
from Library.DataType import TAB_SPECIAL_COMMENT
from Library.DataType import END_OF_LINE
from Library.DataType import TAB_COMMENT_EDK1_SPLIT
from Library.DataType import TAB_COMMENT_EDK1_START
from Library.DataType import TAB_COMMENT_EDK1_END
from Library.DataType import TAB_STAR
from Library.DataType import TAB_PCD_PROMPT
from Library.UniClassObject import ConvertSpecialUnicodes
from Library.Misc import GetLocalValue
## GenTailCommentLines
#
# @param TailCommentLines: the tail comment lines that need to be generated
@@ -33,11 +43,11 @@ from Library.DataType import LANGUAGE_EN_US
# line tail comment
#
def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
EndOfLine = "\n"
TailCommentLines = TailCommentLines.rstrip(EndOfLine)
CommentStr = " ## " + (EndOfLine + LeadingSpaceNum * TAB_SPACE_SPLIT + \
" ## ").join(GetSplitValueList(TailCommentLines, \
EndOfLine))
TailCommentLines = TailCommentLines.rstrip(END_OF_LINE)
CommentStr = TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + \
(END_OF_LINE + LeadingSpaceNum * TAB_SPACE_SPLIT + TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + \
TAB_SPACE_SPLIT).join(GetSplitValueList(TailCommentLines, END_OF_LINE))
return CommentStr
## GenGenericComment
@@ -47,10 +57,9 @@ def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
def GenGenericComment (CommentLines):
if not CommentLines:
return ''
EndOfLine = "\n"
CommentLines = CommentLines.rstrip(EndOfLine)
CommentStr = '## ' + (EndOfLine + '# ').join\
(GetSplitValueList(CommentLines, EndOfLine)) + EndOfLine
CommentLines = CommentLines.rstrip(END_OF_LINE)
CommentStr = TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + (END_OF_LINE + TAB_COMMENT_SPLIT + TAB_SPACE_SPLIT).join\
(GetSplitValueList(CommentLines, END_OF_LINE)) + END_OF_LINE
return CommentStr
## GenGenericCommentF
@@ -61,23 +70,40 @@ def GenGenericComment (CommentLines):
# @param CommentLines: Generic comment Text, maybe Multiple Lines
# @return CommentStr: Generated comment line
#
def GenGenericCommentF (CommentLines, NumOfPound=1):
def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibraryClass=False):
if not CommentLines:
return ''
EndOfLine = "\n"
#
# if comment end with '\n', then remove it to prevent one extra line
# generate later on
#
if CommentLines.endswith(EndOfLine):
if CommentLines.endswith(END_OF_LINE):
CommentLines = CommentLines[:-1]
CommentLineList = GetSplitValueList(CommentLines, EndOfLine)
CommentStr = ''
for Line in CommentLineList:
if Line == '':
CommentStr += '#' * NumOfPound + '\n'
else:
CommentStr += '#' * NumOfPound + ' ' + Line + '\n'
if IsPrompt:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + TAB_PCD_PROMPT + TAB_SPACE_SPLIT + \
CommentLines.replace(END_OF_LINE, '') + END_OF_LINE
else:
CommentLineList = GetSplitValueList(CommentLines, END_OF_LINE)
FindLibraryClass = False
for Line in CommentLineList:
# If this comment is for @libraryclass and it has multiple lines
# make sure the second lines align to the first line after @libraryclass as below
#
# ## @libraryclass XYZ FIRST_LINE
# ## ABC SECOND_LINE
#
if IsInfLibraryClass and Line.find(u'@libraryclass ') > -1:
FindLibraryClass = True
if Line == '':
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + END_OF_LINE
else:
if FindLibraryClass and Line.find(u'@libraryclass ') > -1:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
elif FindLibraryClass:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT * 16 + Line + END_OF_LINE
else:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
return CommentStr
@@ -91,40 +117,57 @@ def GenGenericCommentF (CommentLines, NumOfPound=1):
# @param Copyright possible multiple copyright lines
# @param License possible multiple license lines
#
def GenHeaderCommentSection(Abstract, Description, Copyright, License):
EndOfLine = '\n'
def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryHeader=False, \
CommChar=TAB_COMMENT_SPLIT):
Content = ''
Content += '## @file' + EndOfLine
if Abstract:
Abstract = Abstract.rstrip(EndOfLine)
Content += '# ' + Abstract + EndOfLine
Content += '#' + EndOfLine
#
# Convert special character to (c), (r) and (tm).
#
if isinstance(Abstract, unicode):
Abstract = ConvertSpecialUnicodes(Abstract)
if isinstance(Description, unicode):
Description = ConvertSpecialUnicodes(Description)
if IsBinaryHeader:
Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_BINARY_HEADER_COMMENT + END_OF_LINE
elif CommChar == TAB_COMMENT_EDK1_SPLIT:
Content += CommChar + TAB_SPACE_SPLIT + TAB_COMMENT_EDK1_START + TAB_STAR + TAB_SPACE_SPLIT +\
TAB_HEADER_COMMENT + END_OF_LINE
else:
Content += '#' + EndOfLine
Content += CommChar * 2 + TAB_SPACE_SPLIT + TAB_HEADER_COMMENT + END_OF_LINE
if Abstract:
Abstract = Abstract.rstrip(END_OF_LINE)
Content += CommChar + TAB_SPACE_SPLIT + (END_OF_LINE + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(Abstract, END_OF_LINE))
Content += END_OF_LINE + CommChar + END_OF_LINE
else:
Content += CommChar + END_OF_LINE
if Description:
Description = Description.rstrip(EndOfLine)
Content += '# ' + (EndOfLine + '# ').join(GetSplitValueList\
(Description, '\n'))
Content += EndOfLine + '#' + EndOfLine
Description = Description.rstrip(END_OF_LINE)
Content += CommChar + TAB_SPACE_SPLIT + (END_OF_LINE + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(Description, END_OF_LINE))
Content += END_OF_LINE + CommChar + END_OF_LINE
#
# There is no '#\n' line to separate multiple copyright lines in code base
#
if Copyright:
Copyright = Copyright.rstrip(EndOfLine)
Content += '# ' + (EndOfLine + '# ').join\
(GetSplitValueList(Copyright, '\n'))
Content += EndOfLine + '#' + EndOfLine
Copyright = Copyright.rstrip(END_OF_LINE)
Content += CommChar + TAB_SPACE_SPLIT + (END_OF_LINE + CommChar + TAB_SPACE_SPLIT).join\
(GetSplitValueList(Copyright, END_OF_LINE))
Content += END_OF_LINE + CommChar + END_OF_LINE
if License:
License = License.rstrip(EndOfLine)
Content += '# ' + (EndOfLine + '# ').join(GetSplitValueList\
(License, '\n'))
Content += EndOfLine + '#' + EndOfLine
Content += '##' + EndOfLine
License = License.rstrip(END_OF_LINE)
Content += CommChar + TAB_SPACE_SPLIT + (END_OF_LINE + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(License, END_OF_LINE))
Content += END_OF_LINE + CommChar + END_OF_LINE
if CommChar == TAB_COMMENT_EDK1_SPLIT:
Content += CommChar + TAB_SPACE_SPLIT + TAB_STAR + TAB_COMMENT_EDK1_END + END_OF_LINE
else:
Content += CommChar * 2 + END_OF_LINE
return Content
@@ -197,21 +240,7 @@ def GenDecTailComment (SupModuleList):
# @return HelpStr: the help text string found, '' means no help text found
#
def _GetHelpStr(HelpTextObjList):
HelpStr = ''
ValueList = []
for HelpObj in HelpTextObjList:
if HelpObj and HelpObj.GetLang() == LANGUAGE_EN_US:
HelpStr = HelpObj.GetString()
return HelpStr
for HelpObj in HelpTextObjList:
if HelpObj and HelpObj.GetLang().startswith('en'):
HelpStr = HelpObj.GetString()
return HelpStr
for HelpObj in HelpTextObjList:
if HelpObj and not HelpObj.GetLang():
HelpStr = HelpObj.GetString()
return HelpStr
return HelpStr
ValueList.append((HelpObj.GetLang(), HelpObj.GetString()))
return GetLocalValue(ValueList, True)

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to define comment parsing interface
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -32,6 +32,17 @@ from Library.DataType import HEADER_COMMENT_DESCRIPTION
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_COMMA_SPLIT
from Library.DataType import SUP_MODULE_LIST
from Library.DataType import TAB_VALUE_SPLIT
from Library.DataType import TAB_PCD_VALIDRANGE
from Library.DataType import TAB_PCD_VALIDLIST
from Library.DataType import TAB_PCD_EXPRESSION
from Library.DataType import TAB_PCD_PROMPT
from Library.DataType import TAB_CAPHEX_START
from Library.DataType import TAB_HEX_START
from Library.DataType import PCD_ERR_CODE_MAX_SIZE
from Library.ExpressionValidate import IsValidRangeExpr
from Library.ExpressionValidate import IsValidListExpr
from Library.ExpressionValidate import IsValidLogicalExpr
from Object.POM.CommonObject import TextObject
from Object.POM.CommonObject import PcdErrorObject
import Logger.Log as Logger
@@ -47,13 +58,16 @@ from Logger import StringTable as ST
# @param CommentList: List of (Comment, LineNumber)
# @param FileName: FileName of the comment
#
def ParseHeaderCommentSection(CommentList, FileName = None):
def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = False):
Abstract = ''
Description = ''
Copyright = ''
License = ''
EndOfLine = "\n"
STR_HEADER_COMMENT_START = "@file"
if IsBinaryHeader:
STR_HEADER_COMMENT_START = "@BinaryHeader"
else:
STR_HEADER_COMMENT_START = "@file"
HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
#
@@ -94,7 +108,6 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
# in case there is no abstract and description
#
if not Comment:
Abstract = ''
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
@@ -134,14 +147,7 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
if not Comment and not License:
continue
License += Comment + EndOfLine
if not Copyright:
Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_COPYRIGHT_MISSING, \
FileName)
if not License:
Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_LICENSE_MISSING, FileName)
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
@@ -158,7 +164,7 @@ def _IsCopyrightLine (LineContent):
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
return Result
## ParseGenericComment
@@ -188,6 +194,37 @@ def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
return HelpTxt
## ParsePcdErrorCode
#
# @param Value: original ErrorCode value
# @param ContainerFile: Input value for filename of Dec file
# @param LineNum: Line Num
#
def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
try:
if Value.strip().startswith((TAB_HEX_START, TAB_CAPHEX_START)):
Base = 16
else:
Base = 10
ErrorCode = long(Value, Base)
if ErrorCode > PCD_ERR_CODE_MAX_SIZE or ErrorCode < 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
#
# To delete the tailing 'L'
#
return hex(ErrorCode)[:-1]
except ValueError, XStr:
if XStr:
pass
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
## ParseDecPcdGenericComment
#
@@ -195,46 +232,135 @@ def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
# LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseDecPcdGenericComment (GenericComment, ContainerFile):
def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
HelpStr = ''
PromptStr = ''
PcdErr = None
PcdErrList = []
ValidValueNum = 0
ValidRangeNum = 0
ExpressionNum = 0
for (CommentLine, LineNum) in GenericComment:
Comment = CleanString2(CommentLine)[1]
if Comment.startswith("@ValidRange"):
if PcdErr:
#
# To replace Macro
#
MACRO_PATTERN = '[\t\s]*\$\([A-Z][_A-Z0-9]*\)'
MatchedStrs = re.findall(MACRO_PATTERN, Comment)
for MatchedStr in MatchedStrs:
if MatchedStr:
Macro = MatchedStr.strip().lstrip('$(').rstrip(')').strip()
if Macro in MacroReplaceDict:
Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
if Comment.startswith(TAB_PCD_VALIDRANGE):
if ValidValueNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
ValidRange = Comment.replace("@ValidRange", "", 1)
if _CheckRangeExpression(ValidRange):
else:
PcdErr = PcdErrorObject()
PcdErr.SetValidValueRange(ValidRange)
elif Comment.startswith("@ValidList"):
if PcdErr:
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidRangeNum += 1
ValidRange = Comment.replace(TAB_PCD_VALIDRANGE, "", 1).strip()
Valid, Cause = _CheckRangeExpression(ValidRange)
if Valid:
ValueList = ValidRange.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValueRange((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValueRange(ValidRange)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_VALIDLIST):
if ValidRangeNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
ValidValue = Comment.replace("@ValidList", "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
PcdErr = PcdErrorObject()
PcdErr.SetValidValue(ValidValue)
elif Comment.startswith("@Expression"):
if PcdErr:
elif ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
ST.WRN_MULTI_PCD_VALIDVALUE,
File = ContainerFile,
Line = LineNum)
Expression = Comment.replace("@Expression", "", 1)
if _CheckRangeExpression(Expression):
else:
PcdErr = PcdErrorObject()
PcdErr.SetExpression(Expression)
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidValueNum += 1
ValidValueExpr = Comment.replace(TAB_PCD_VALIDLIST, "", 1).strip()
Valid, Cause = _CheckListExpression(ValidValueExpr)
if Valid:
ValidValue = Comment.replace(TAB_PCD_VALIDLIST, "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
ValueList = ValidValue.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValue((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValue(ValidValue)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_EXPRESSION):
if ValidRangeNum > 0 or ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ExpressionNum += 1
Expression = Comment.replace(TAB_PCD_EXPRESSION, "", 1).strip()
Valid, Cause = _CheckExpression(Expression)
if Valid:
ValueList = Expression.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetExpression((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetExpression(Expression)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_PROMPT):
if PromptStr:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_PROMPT,
File = ContainerFile,
Line = LineNum)
PromptStr = Comment.replace(TAB_PCD_PROMPT, "", 1).strip()
else:
HelpStr += Comment + '\n'
if Comment:
HelpStr += Comment + '\n'
#
# remove the last EOL if the comment is of format 'FOO\n'
@@ -243,7 +369,7 @@ def ParseDecPcdGenericComment (GenericComment, ContainerFile):
if HelpStr != '\n' and not HelpStr.endswith('\n\n'):
HelpStr = HelpStr[:-1]
return HelpStr, PcdErr
return HelpStr, PcdErrList, PromptStr
## ParseDecPcdTailComment
#
@@ -289,18 +415,43 @@ def ParseDecPcdTailComment (TailCommentList, ContainerFile):
return SupModuleList, HelpStr
## _CheckListExpression
#
# @param Expression: Pcd value list expression
#
def _CheckListExpression(Expression):
ListExpr = ''
if TAB_VALUE_SPLIT in Expression:
ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
ListExpr = Expression
return IsValidListExpr(ListExpr)
## _CheckExpreesion
#
# @param Expression: Pcd value expression
#
def _CheckExpression(Expression):
Expr = ''
if TAB_VALUE_SPLIT in Expression:
Expr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
Expr = Expression
return IsValidLogicalExpr(Expr, True)
## _CheckRangeExpression
#
# @param Expression: Pcd range expression
#
def _CheckRangeExpression(Expression):
#
# check grammar for Pcd range expression is not required yet
#
if Expression:
pass
return True
RangeExpr = ''
if TAB_VALUE_SPLIT in Expression:
RangeExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
RangeExpr = Expression
return IsValidRangeExpr(RangeExpr)
## ValidateCopyright
#
@@ -349,7 +500,6 @@ def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable)
Usage = None
Type = None
String = None
HelpText = None
Comment = Comment[0]

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to define class for data type structure
#
# Copyright (c) 2011 - 2013, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -47,7 +47,10 @@ USAGE_LIST = ["CONSUMES",
"PRODUCES",
"SOMETIMES_PRODUCES"]
LANGUAGE_EN_US = 'en-US'
TAB_LANGUAGE_EN_US = 'en-US'
TAB_LANGUAGE_ENG = 'eng'
TAB_LANGUAGE_EN = 'en'
TAB_LANGUAGE_EN_X = 'en-x-tianocore'
USAGE_ITEM_PRODUCES = 'PRODUCES'
USAGE_ITEM_SOMETIMES_PRODUCES = 'SOMETIMES_PRODUCES'
@@ -80,6 +83,14 @@ USAGE_SOMETIMES_CONSUMES_LIST = [USAGE_ITEM_SOMETIMES_CONSUMES,
ITEM_UNDEFINED = 'UNDEFINED'
TAB_PCD_VALIDRANGE = '@ValidRange'
TAB_PCD_VALIDLIST = '@ValidList'
TAB_PCD_EXPRESSION = '@Expression'
TAB_PCD_PROMPT = '@Prompt'
TAB_STR_TOKENCNAME = 'STR'
TAB_STR_TOKENPROMPT = 'PROMPT'
TAB_STR_TOKENHELP = 'HELP'
TAB_STR_TOKENERR = 'ERR'
#
# Dictionary of usage tokens and their synonmys
@@ -269,11 +280,12 @@ PCD_USAGE_TYPE_LIST_OF_UPT = PCD_USAGE_TYPE_LIST_OF_MODULE
##
# Binary File Type List
#
BINARY_FILE_TYPE_LIST = ["GUID", "PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT16", "FV", "BIN", "RAW",
BINARY_FILE_TYPE_LIST = ["PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT16", "FV", "BIN", "RAW",
"ACPI", "ASL",
"PEI_DEPEX",
"SMM_DEPEX",
"SUBTYPE_GUID"
"SUBTYPE_GUID",
"DISPOSABLE"
]
BINARY_FILE_TYPE_LIST_IN_UDP = \
["GUID", "FREEFORM",
@@ -285,6 +297,7 @@ BINARY_FILE_TYPE_LIST_IN_UDP = \
"BIN", "VER", "UI"
]
SUBTYPE_GUID_BINARY_FILE_TYPE = "FREEFORM"
##
# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
# the table,
@@ -328,6 +341,7 @@ TAB_EQUAL_SPLIT = '='
TAB_DEQUAL_SPLIT = '=='
TAB_VALUE_SPLIT = '|'
TAB_COMMA_SPLIT = ','
TAB_HORIZON_LINE_SPLIT = '-'
TAB_SPACE_SPLIT = ' '
TAB_UNDERLINE_SPLIT = '_'
TAB_SEMI_COLON_SPLIT = ';'
@@ -341,7 +355,13 @@ TAB_BACK_SLASH = '/'
TAB_SPECIAL_COMMENT = '##'
TAB_HEADER_COMMENT = '@file'
TAB_BINARY_HEADER_COMMENT = '@BinaryHeader'
TAB_STAR = "*"
TAB_STAR = '*'
TAB_ENCODING_UTF16LE = 'utf_16_le'
TAB_CAPHEX_START = '0X'
TAB_HEX_START = '0x'
TAB_PCD_ERROR = 'Error'
TAB_PCD_ERROR_SECTION_COMMENT = 'Error message section'
TAB_UNI_FILE_SUFFIXS = ['.uni', '.UNI', '.Uni']
TAB_EDK_SOURCE = '$(EDK_SOURCE)'
TAB_EFI_SOURCE = '$(EFI_SOURCE)'
@@ -354,10 +374,9 @@ TAB_ARCH_X64 = 'X64'
TAB_ARCH_IPF = 'IPF'
TAB_ARCH_ARM = 'ARM'
TAB_ARCH_EBC = 'EBC'
TAB_ARCH_AARCH64 = 'AARCH64'
ARCH_LIST = \
[TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_EBC, TAB_ARCH_AARCH64]
[TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_IPF, TAB_ARCH_ARM, TAB_ARCH_EBC]
SUP_MODULE_BASE = 'BASE'
SUP_MODULE_SEC = 'SEC'
@@ -442,7 +461,6 @@ TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
TAB_SOURCES_IPF = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IPF
TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
TAB_SOURCES_AARCH64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_BINARIES = 'Binaries'
TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
@@ -451,7 +469,6 @@ TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_BINARIES_IPF = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IPF
TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_BINARIES_AARCH64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_INCLUDES = 'Includes'
TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
@@ -460,7 +477,6 @@ TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
TAB_INCLUDES_IPF = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IPF
TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
TAB_INCLUDES_AARCH64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_GUIDS = 'Guids'
TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
@@ -469,7 +485,6 @@ TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
TAB_GUIDS_IPF = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IPF
TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
TAB_GUIDS_AARCH64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PROTOCOLS = 'Protocols'
TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
@@ -478,7 +493,6 @@ TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
TAB_PROTOCOLS_IPF = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IPF
TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PROTOCOLS_AARCH64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PPIS = 'Ppis'
TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
@@ -487,7 +501,6 @@ TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
TAB_PPIS_IPF = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IPF
TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PPIS_AARCH64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_LIBRARY_CLASSES = 'LibraryClasses'
TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
@@ -496,7 +509,6 @@ TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARY_CLASSES_IPF = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IPF
TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
TAB_LIBRARY_CLASSES_AARCH64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PACKAGES = 'Packages'
TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
@@ -505,7 +517,6 @@ TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
TAB_PACKAGES_IPF = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IPF
TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
TAB_PACKAGES_AARCH64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS = 'Pcds'
TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
@@ -545,8 +556,6 @@ TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_FIXED_AT_BUILD_AARCH64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + \
TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE \
@@ -561,8 +570,6 @@ TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_PATCHABLE_IN_MODULE_AARCH64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + \
TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT \
@@ -577,8 +584,6 @@ TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_ARM
TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_EBC
TAB_PCDS_FEATURE_FLAG_AARCH64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + \
TAB_ARCH_AARCH64
TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
@@ -596,8 +601,6 @@ TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_EX_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + \
TAB_ARCH_AARCH64
TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
@@ -610,7 +613,6 @@ TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_DYNAMIC_IPF = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IPF
TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCD_DYNAMIC_TYPE_LIST = [TAB_PCDS_DYNAMIC_DEFAULT_NULL, \
TAB_PCDS_DYNAMIC_VPD_NULL, \
@@ -651,7 +653,6 @@ TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
TAB_DEPEX_IPF = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IPF
TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
TAB_DEPEX_AARCH64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_SKUIDS = 'SkuIds'
@@ -662,7 +663,6 @@ TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARIES_IPF = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IPF
TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_LIBRARIES_AARCH64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_COMPONENTS = 'Components'
TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
@@ -671,7 +671,6 @@ TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
TAB_COMPONENTS_IPF = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IPF
TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
TAB_COMPONENTS_AARCH64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_COMPONENTS_SOURCE_OVERRIDE_PATH = 'SOURCE_OVERRIDE_PATH'
@@ -699,6 +698,7 @@ TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
TAB_INF_DEFINES_MODULE_UNI_FILE = 'MODULE_UNI_FILE'
TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
@@ -750,7 +750,7 @@ TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
TAB_DEC_DEFINES_PKG_UNI_FILE = 'PKG_UNI_FILE'
TAB_DEC_DEFINES_PKG_UNI_FILE = 'PACKAGE_UNI_FILE'
TAB_DEC_PACKAGE_ABSTRACT = 'STR_PACKAGE_ABSTRACT'
TAB_DEC_PACKAGE_DESCRIPTION = 'STR_PACKAGE_DESCRIPTION'
TAB_DEC_PACKAGE_LICENSE = 'STR_PACKAGE_LICENSE'
@@ -827,6 +827,7 @@ TAB_HEADER_COPYRIGHT = 'Copyright'
TAB_HEADER_LICENSE = 'License'
TAB_BINARY_HEADER_IDENTIFIER = 'BinaryHeader'
TAB_BINARY_HEADER_USERID = 'TianoCore'
#
# Build database path
#
@@ -951,3 +952,5 @@ TOOL_FAMILY_LIST = ["MSFT",
TYPE_HOB_SECTION = 'HOB'
TYPE_EVENT_SECTION = 'EVENT'
TYPE_BOOTMODE_SECTION = 'BOOTMODE'
PCD_ERR_CODE_MAX_SIZE = 4294967295

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to check PCD logical expression
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -106,16 +106,21 @@ class _ExprBase:
'>' : '=',
'<' : '='
}
for Operator in OpList:
if not self.Token[self.Index:].startswith(Operator):
continue
self.Index += len(Operator)
Char = self.Token[self.Index : self.Index + 1]
if (Operator in LetterOp and (Char == '_' or Char.isalnum())) \
or (Operator in OpMap and OpMap[Operator] == Char):
self.Index -= len(Operator)
break
return True
return False
## _LogicalExpressionParser
@@ -166,6 +171,7 @@ class _LogicalExpressionParser(_ExprBase):
return False
return True
return False
def IsAtomicNumVal(self):
@@ -216,32 +222,32 @@ class _LogicalExpressionParser(_ExprBase):
#
def LogicalExpression(self):
Ret = self.SpecNot()
while self.IsCurrentOp(['||', 'OR', 'or', '&&', 'AND', 'and', 'XOR']):
while self.IsCurrentOp(['||', 'OR', 'or', '&&', 'AND', 'and', 'XOR', 'xor', '^']):
if self.Token[self.Index-1] == '|' and self.Parens <= 0:
raise _ExprError(ST.ERR_EXPR_OR)
if Ret == self.ARITH:
raise _ExprError(ST.ERR_EXPR_OR % self.Token)
if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.SpecNot()
if Ret == self.ARITH:
if Ret not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
def SpecNot(self):
if self.IsCurrentOp(["NOT", "!"]):
if self.IsCurrentOp(["NOT", "!", "not"]):
return self.SpecNot()
return self.Rel()
## A < B, A > B, A <= B, A >= b
## A < B, A > B, A <= B, A >= B
#
def Rel(self):
Ret = self.Expr()
if self.IsCurrentOp(["<=", ">=", ">", "<", "GT", "LT", "GE", "LE",
"==", "EQ", "!=", "NE"]):
if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
if Ret == self.STRINGITEM:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.Expr()
if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
if Ret == self.REALLOGICAL:
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
@@ -250,7 +256,7 @@ class _LogicalExpressionParser(_ExprBase):
#
def Expr(self):
Ret = self.Factor()
while self.IsCurrentOp(["+", "-", "&", "|", "^"]):
while self.IsCurrentOp(["+", "-", "&", "|", "^", "XOR", "xor"]):
if self.Token[self.Index-1] == '|' and self.Parens <= 0:
raise _ExprError(ST.ERR_EXPR_OR)
if Ret == self.STRINGITEM or Ret == self.REALLOGICAL:
@@ -281,15 +287,15 @@ class _LogicalExpressionParser(_ExprBase):
return self.ARITH
else:
raise _ExprError(ST.ERR_EXPR_FACTOR % \
(self.Token, self.Token[self.Index:]))
(self.Token[self.Index:], self.Token))
## IsValidLogicalExpression
#
def IsValidLogicalExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPR_EMPTY
return False, ST.ERR_EXPRESS_EMPTY
try:
if self.LogicalExpression() == self.ARITH:
if self.LogicalExpression() not in [self.ARITH, self.LOGICAL, self.REALLOGICAL, self.STRINGITEM]:
return False, ST.ERR_EXPR_LOGICAL % self.Token
except _ExprError, XExcept:
return False, XExcept.Error
@@ -307,55 +313,84 @@ class _ValidRangeExpressionParser(_ExprBase):
'[\t\s]*0[xX][a-fA-F0-9]+[\t\s]*-[\t\s]*0[xX][a-fA-F0-9]+'
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.Parens = 0
self.HEX = 1
self.INT = 2
self.IsParenHappen = False
self.IsLogicalOpHappen = False
## IsValidRangeExpression
#
def IsValidRangeExpression(self):
if self.Len == 0:
return False
return False, ST.ERR_EXPR_RANGE_EMPTY
try:
self.RangeExpression()
except _ExprError:
return False
if self.RangeExpression() not in [self.HEX, self.INT]:
return False, ST.ERR_EXPR_RANGE % self.Token
except _ExprError, XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False
return True
return False, (ST.ERR_EXPR_RANGE % self.Token)
return True, ''
## RangeExpression
#
def RangeExpression(self):
self.Unary()
while self.IsCurrentOp(['OR', 'AND', 'XOR']):
self.Unary()
Ret = self.Unary()
while self.IsCurrentOp(['OR', 'AND', 'and', 'or']):
self.IsLogicalOpHappen = True
if not self.IsParenHappen:
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
self.IsParenHappen = False
Ret = self.Unary()
if self.IsCurrentOp(['XOR']):
Ret = self.Unary()
return Ret
## Unary
#
def Unary(self):
if self.IsCurrentOp(["NOT", "-"]):
if self.IsCurrentOp(["NOT"]):
return self.Unary()
return self.ValidRange()
## ValidRange
#
def ValidRange(self):
Ret = -1
if self.IsCurrentOp(["("]):
self.RangeExpression()
self.IsLogicalOpHappen = False
self.IsParenHappen = True
self.Parens += 1
if self.Parens > 1:
raise _ExprError(ST.ERR_EXPR_RANGE_DOUBLE_PAREN_NESTED % self.Token)
Ret = self.RangeExpression()
if not self.IsCurrentOp([")"]):
raise _ExprError('')
return
raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % self.Token)
self.Parens -= 1
return Ret
if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ"]):
if self.IsLogicalOpHappen:
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ", "XOR"]):
IntMatch = \
re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
HexMatch = \
re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
if HexMatch and HexMatch.start() == 0:
self.Index += HexMatch.end()
Ret = self.HEX
elif IntMatch and IntMatch.start() == 0:
self.Index += IntMatch.end()
Ret = self.INT
else:
raise _ExprError('')
raise _ExprError(ST.ERR_EXPR_RANGE_FACTOR % (self.Token[self.Index:], self.Token))
else:
IntRangeMatch = re.compile(
self.INT_RANGE_PATTERN).match(self.Token[self.Index:]
@@ -365,15 +400,50 @@ class _ValidRangeExpressionParser(_ExprBase):
)
if HexRangeMatch and HexRangeMatch.start() == 0:
self.Index += HexRangeMatch.end()
Ret = self.HEX
elif IntRangeMatch and IntRangeMatch.start() == 0:
self.Index += IntRangeMatch.end()
Ret = self.INT
else:
raise _ExprError('')
if self.Token[self.Index:self.Index+1] == '_' or \
self.Token[self.Index:self.Index+1].isalnum():
raise _ExprError('')
raise _ExprError(ST.ERR_EXPR_RANGE % self.Token)
return Ret
## _ValidListExpressionParser
#
class _ValidListExpressionParser(_ExprBase):
VALID_LIST_PATTERN = '(0[xX][0-9a-fA-F]+|[0-9]+)([\t\s]*,[\t\s]*(0[xX][0-9a-fA-F]+|[0-9]+))*'
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.NUM = 1
def IsValidListExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPR_LIST_EMPTY
try:
if self.ListExpression() not in [self.NUM]:
return False, ST.ERR_EXPR_LIST % self.Token
except _ExprError, XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False, (ST.ERR_EXPR_LIST % self.Token)
return True, ''
def ListExpression(self):
Ret = -1
self.SkipWhitespace()
ListMatch = re.compile(self.VALID_LIST_PATTERN).match(self.Token[self.Index:])
if ListMatch and ListMatch.start() == 0:
self.Index += ListMatch.end()
Ret = self.NUM
else:
raise _ExprError(ST.ERR_EXPR_LIST % self.Token)
return Ret
## _StringTestParser
#
class _StringTestParser(_ExprBase):
@@ -423,12 +493,26 @@ class _StringTestParser(_ExprBase):
self.StringItem()
if not self.IsCurrentOp(["==", "EQ", "!=", "NE"]):
raise _ExprError(ST.ERR_EXPR_EQUALITY % \
(self.Token, self.Token[self.Index:]))
(self.Token[self.Index:], self.Token))
self.StringItem()
if self.Index != self.Len:
raise _ExprError(ST.ERR_EXPR_BOOLEAN % \
(self.Token[self.Index:], self.Token))
##
# Check syntax of string test
#
# @param Token: string test token
#
def IsValidStringTest(Token, Flag=False):
#
# Not do the check right now, keep the implementation for future enhancement.
#
if not Flag:
return True, ""
return _StringTestParser(Token).IsValidStringTest()
##
# Check syntax of logical expression
#
@@ -442,19 +526,6 @@ def IsValidLogicalExpr(Token, Flag=False):
return True, ""
return _LogicalExpressionParser(Token).IsValidLogicalExpression()
##
# Check syntax of string test
#
# @param Token: string test token
#
def IsValidStringTest(Token, Flag=False):
#
# Not do the check right now, keep the implementation for future enhancement.
#
if not Flag:
return True, ""
return _StringTestParser(Token).IsValidStringTest()
##
# Check syntax of range expression
#
@@ -463,6 +534,14 @@ def IsValidStringTest(Token, Flag=False):
def IsValidRangeExpr(Token):
return _ValidRangeExpressionParser(Token).IsValidRangeExpression()
##
# Check syntax of value list expression token
#
# @param Token: value list expression token
#
def IsValidListExpr(Token):
return _ValidListExpressionParser(Token).IsValidListExpression()
##
# Check whether the feature flag expression is valid or not
#
@@ -486,4 +565,8 @@ def IsValidFeatureFlagExp(Token, Flag=False):
return True, ""
if __name__ == '__main__':
print _LogicalExpressionParser('a ^ b > a + b').IsValidLogicalExpression()
# print IsValidRangeExpr('LT 9')
print _LogicalExpressionParser('gCrownBayTokenSpaceGuid.PcdPciDevice1BridgeAddressLE0').IsValidLogicalExpression()

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to define common static strings and global data used by UPT
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -92,6 +92,17 @@ gUNPACK_DIR = None
# Flag used to mark whether the INF file is Binary INF or not.
#
gIS_BINARY_INF = False
#
# Used by FileHook module.
#
gRECOVERMGR = None
#
# Used by PCD parser
#
gPackageDict = {}
#
# Used by Library instance parser
# {FilePath: FileObj}

View File

@@ -1,7 +1,7 @@
## @file
# Common routines used by all tools
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -41,11 +41,15 @@ from Library import GlobalData
from Library.DataType import SUP_MODULE_LIST
from Library.DataType import END_OF_LINE
from Library.DataType import TAB_SPLIT
from Library.DataType import LANGUAGE_EN_US
from Library.DataType import TAB_LANGUAGE_EN_US
from Library.DataType import TAB_LANGUAGE_EN
from Library.DataType import TAB_LANGUAGE_EN_X
from Library.DataType import TAB_UNI_FILE_SUFFIXS
from Library.String import GetSplitValueList
from Library.ParserValidate import IsValidHexVersion
from Library.ParserValidate import IsValidPath
from Object.POM.CommonObject import TextObject
from Core.FileHook import __FileHookOpen__
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
# structure style
@@ -160,14 +164,14 @@ def SaveFileOnChange(File, Content, IsBinaryFile=True):
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
if Content == __FileHookOpen__(File, "rb").read():
return False
except BaseException:
Logger.Error(None, ToolError.FILE_OPEN_FAILURE, ExtraData=File)
CreateDirectory(os.path.dirname(File))
try:
FileFd = open(File, "wb")
FileFd = __FileHookOpen__(File, "wb")
FileFd.write(Content)
FileFd.close()
except BaseException:
@@ -188,6 +192,8 @@ def GetFiles(Root, SkipList=None, FullPath=True):
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
if Item in Files:
Files.remove(Item)
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
@@ -566,39 +572,46 @@ class PathClass(object):
Key = property(_GetFileKey)
## Check environment variables
## Get current workspace
#
# Check environment variables that must be set for build. Currently they are
# get WORKSPACE from environment variable if present,if not use current working directory as WORKSPACE
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
def GetWorkspace():
#
# check WORKSPACE
#
if "WORKSPACE" not in environ:
Logger.Error("UPT",
ToolError.UPT_ENVIRON_MISSING_ERROR,
ST.ERR_NOT_FOUND_ENVIRONMENT,
ExtraData="WORKSPACE")
if "WORKSPACE" in environ:
WorkspaceDir = os.path.normpath(environ["WORKSPACE"])
if not os.path.exists(WorkspaceDir):
Logger.Error("UPT",
ToolError.UPT_ENVIRON_MISSING_ERROR,
ST.ERR_WORKSPACE_NOTEXIST,
ExtraData="%s" % WorkspaceDir)
else:
WorkspaceDir = os.getcwd()
WorkspaceDir = os.path.normpath(environ["WORKSPACE"])
if not os.path.exists(WorkspaceDir):
Logger.Error("UPT",
ToolError.UPT_ENVIRON_MISSING_ERROR,
ST.ERR_WORKSPACE_NOTEXIST,
ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
Logger.Error("UPT",
ToolError.FORMAT_NOT_SUPPORTED,
ST.ERR_SPACE_NOTALLOWED,
ExtraData=WorkspaceDir)
if WorkspaceDir[-1] == ':':
WorkspaceDir += os.sep
return WorkspaceDir
## Get relative path
#
# use full path and workspace to get relative path
# the destination of this function is mainly to resolve the root path issue(like c: or c:\)
#
# @param Fullpath: a string of fullpath
# @param Workspace: a string of workspace
#
def GetRelativePath(Fullpath, Workspace):
RelativePath = ''
if Workspace.endswith(os.sep):
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace):]
else:
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace)+1:]
return RelativePath
## Check whether all module types are in list
#
# check whether all module types (SUP_MODULE_LIST) are in list
@@ -644,7 +657,7 @@ class MergeCommentDict(dict):
#
def GenDummyHelpTextObj():
HelpTxt = TextObject()
HelpTxt.SetLang(LANGUAGE_EN_US)
HelpTxt.SetLang(TAB_LANGUAGE_EN_US)
HelpTxt.SetString(' ')
return HelpTxt
@@ -972,3 +985,136 @@ def GetLibInstanceInfo(String, WorkSpace, LineNo):
VerString = GetSplitValueList(VerString, '=', 1)[1]
return FileGuidString, VerString
## GetLocalValue
#
# Generate the local value for INF and DEC file. If Lang attribute not present, then use this value.
# If present, and there is no element without the Lang attribute, and one of the elements has the rfc1766 code is
# "en-x-tianocore", or "en-US" if "en-x-tianocore" was not found, or "en" if "en-US" was not found, or startswith 'en'
# if 'en' was not found, then use this value.
# If multiple entries of a tag exist which have the same language code, use the last entry.
#
# @param ValueList A list need to be processed.
# @param UseFirstValue: True to use the first value, False to use the last value
#
# @return LocalValue
def GetLocalValue(ValueList, UseFirstValue=False):
Value1 = ''
Value2 = ''
Value3 = ''
Value4 = ''
Value5 = ''
for (Key, Value) in ValueList:
if Key == TAB_LANGUAGE_EN_X:
if UseFirstValue:
if not Value1:
Value1 = Value
else:
Value1 = Value
if Key == TAB_LANGUAGE_EN_US:
if UseFirstValue:
if not Value2:
Value2 = Value
else:
Value2 = Value
if Key == TAB_LANGUAGE_EN:
if UseFirstValue:
if not Value3:
Value3 = Value
else:
Value3 = Value
if Key.startswith(TAB_LANGUAGE_EN):
if UseFirstValue:
if not Value4:
Value4 = Value
else:
Value4 = Value
if Key == '':
if UseFirstValue:
if not Value5:
Value5 = Value
else:
Value5 = Value
if Value1:
return Value1
if Value2:
return Value2
if Value3:
return Value3
if Value4:
return Value4
if Value5:
return Value5
return ''
## GetCharIndexOutStr
#
# Get comment character index outside a string
#
# @param Line: The string to be checked
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval Index
#
def GetCharIndexOutStr(CommentCharacter, Line):
#
# remove whitespace
#
Line = Line.strip()
#
# Check whether comment character is in a string
#
InString = False
for Index in range(0, len(Line)):
if Line[Index] == '"':
InString = not InString
elif Line[Index] == CommentCharacter and InString :
pass
elif Line[Index] == CommentCharacter and (Index +1) < len(Line) and Line[Index+1] == CommentCharacter \
and not InString :
return Index
return -1
## ValidateUNIFilePath
#
# Check the UNI file path
#
# @param FilePath: The UNI file path
#
def ValidateUNIFilePath(Path):
Suffix = Path[Path.rfind(TAB_SPLIT):]
#
# Check if the suffix is one of the '.uni', '.UNI', '.Uni'
#
if Suffix not in TAB_UNI_FILE_SUFFIXS:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_SUFFIX_WRONG,
ExtraData=Path)
#
# Check if '..' in the file name(without suffixe)
#
if (TAB_SPLIT + TAB_SPLIT) in Path:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_NAME_INVALID,
ExtraData=Path)
#
# Check if the file name is valid according to the DEC and INF specification
#
Pattern = '[a-zA-Z0-9_][a-zA-Z0-9_\-\.]*'
FileName = Path.replace(Suffix, '')
InvalidCh = re.sub(Pattern, '', FileName)
if InvalidCh:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID,
ExtraData=Path)

View File

@@ -1,6 +1,7 @@
## @file ParserValidate.py
# Functions for parser validation
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -566,7 +567,7 @@ def IsValidPcdValue(PcdValue):
for Char in PcdValue:
if Char == '\n' or Char == '\t' or Char == '\f':
return False
#
# <Boolean>
#
@@ -582,7 +583,7 @@ def IsValidPcdValue(PcdValue):
if IsValidHex(PcdValue):
return True
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
if ReIsValidIntegerSingle.match(PcdValue) != None:
return True
@@ -590,7 +591,6 @@ def IsValidPcdValue(PcdValue):
if ReIsValidIntegerMulti.match(PcdValue) != None:
return True
#
# <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
# <StringType> ::= {<UnicodeString>} {<CString>}

View File

@@ -2,7 +2,7 @@
# This file is used to define common parsing related functions used in parsing
# INF/DEC/DSC process
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -42,6 +42,7 @@ from Logger import StringTable as ST
import Logger.Log as Logger
from Parser.DecParser import Dec
import GlobalData
gPKG_INFO_DICT = {}
@@ -53,7 +54,7 @@ gPKG_INFO_DICT = {}
# @param String: String with BuildOption statement
# @param File: The file which defines build option, used in error report
#
def GetBuildOption(String, File, LineNo=-1):
def GetBuildOption(String, File, LineNo= -1):
(Family, ToolChain, Flag) = ('', '', '')
if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
RaiseParserError(String, 'BuildOptions', File, \
@@ -76,7 +77,7 @@ def GetBuildOption(String, File, LineNo=-1):
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo=-1):
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
List = GetSplitValueList(Item[0])
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(List) != 2:
@@ -101,7 +102,7 @@ def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo=-1):
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo = -1):
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
SupMod = DataType.SUP_MODULE_LIST_STRING
@@ -131,7 +132,7 @@ def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo = -1):
# @param Section: Used for error report
# @param File: Used for error report
#
def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo=-1):
def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>'
if TokenInfoString != '' and TokenInfoString != None:
TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
@@ -151,7 +152,7 @@ def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo=-1):
# report
#
def GetPcd(Item, Type, ContainerFile, LineNo=-1):
def GetPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
@@ -178,7 +179,7 @@ def GetPcd(Item, Type, ContainerFile, LineNo=-1):
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo=-1):
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value = '', '', ''
List = GetSplitValueList(Item)
if len(List) != 2:
@@ -202,7 +203,7 @@ def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo=-1):
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo=-1):
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 8:
@@ -228,7 +229,7 @@ def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo=-1):
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo = -1):
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
'', '', '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
@@ -255,7 +256,7 @@ def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo = -1):
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo=-1):
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2 = '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
if len(List) < 3 or len(List) > 4:
@@ -533,7 +534,7 @@ def GetComponents(Lines, KeyValues, CommentCharacter):
# @param ContainerFile: The file which describes the library class, used
# for error report
#
def GetSource(Item, ContainerFile, FileRelativePath, LineNo=-1):
def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
List = GetSplitValueList(ItemNew)
if len(List) < 5 or len(List) > 9:
@@ -558,7 +559,7 @@ def GetSource(Item, ContainerFile, FileRelativePath, LineNo=-1):
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetBinary(Item, ContainerFile, LineNo=-1):
def GetBinary(Item, ContainerFile, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
if len(List) < 3 or len(List) > 5:
@@ -596,7 +597,7 @@ def GetGuidsProtocolsPpisOfInf(Item):
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo=-1):
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
@@ -614,7 +615,7 @@ def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo=-1):
RegisterFormatGuid = List[1]
else:
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
'CFormat or RegisterFormat', LineNo)
return (List[0], RegisterFormatGuid)
@@ -627,7 +628,7 @@ def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo=-1):
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo=-1):
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
@@ -683,7 +684,7 @@ def GetPcdOfInf(Item, Type, File, LineNo):
# @param File: Dec file
# @param LineNo: Line number
#
def GetPcdOfDec(Item, Type, File, LineNo=-1):
def GetPcdOfDec(Item, Type, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
List = GetSplitValueList(Item)
@@ -756,7 +757,7 @@ def InsertSectionItems(Model, SectionItemList, ArchList, \
LineValue, StartLine, Comment = SectionItem[0], \
SectionItem[1], SectionItem[2]
Logger.Debug(4, ST.MSG_PARSING %LineValue)
Logger.Debug(4, ST.MSG_PARSING % LineValue)
#
# And then parse DEFINE statement
#
@@ -782,7 +783,7 @@ def GenMetaDatSectionItem(Key, Value, List):
List[Key] = [Value]
else:
List[Key].append(Value)
## GetPkgInfoFromDec
#
# get package name, guid, version info from dec files
@@ -793,17 +794,23 @@ def GetPkgInfoFromDec(Path):
PkgName = None
PkgGuid = None
PkgVersion = None
Path = Path.replace('\\', '/')
if not os.path.exists(Path):
Logger.Error("\nUPT", FILE_NOT_FOUND, File = Path)
Logger.Error("\nUPT", FILE_NOT_FOUND, File=Path)
if Path in gPKG_INFO_DICT:
return gPKG_INFO_DICT[Path]
try:
DecParser = Dec(Path)
DecParser = None
if Path not in GlobalData.gPackageDict:
DecParser = Dec(Path)
GlobalData.gPackageDict[Path] = DecParser
else:
DecParser = GlobalData.gPackageDict[Path]
PkgName = DecParser.GetPackageName()
PkgGuid = DecParser.GetPackageGuid()
PkgVersion = DecParser.GetPackageVersion()
@@ -819,7 +826,7 @@ def GetPkgInfoFromDec(Path):
#
def GetWorkspacePackage():
DecFileList = []
WorkspaceDir = os.environ["WORKSPACE"]
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
@@ -843,7 +850,7 @@ def GetWorkspacePackage():
(PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
if PkgName and PkgGuid and PkgVersion:
PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
return PkgList
## GetWorkspaceModule
@@ -852,7 +859,7 @@ def GetWorkspacePackage():
#
def GetWorkspaceModule():
InfFileList = []
WorkspaceDir = os.environ["WORKSPACE"]
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
@@ -870,7 +877,7 @@ def GetWorkspaceModule():
if Ext.lower() in ['.inf']:
InfFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
return InfFileList
## MacroParser used to parse macro definition
@@ -890,7 +897,7 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
# Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
#
return None, None
TokenList = GetSplitValueList(LineContent[Match.end(1):], \
DataType.TAB_EQUAL_SPLIT, 1)
#
@@ -910,16 +917,16 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
#
if SectionType == DataType.MODEL_META_DATA_HEADER:
FileLocalMacros[Name] = Value
ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
if ReIsValidMacroName.match(Name) == None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACRONAME_INVALID%(Name),
ExtraData=LineContent,
File=FileName,
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACRONAME_INVALID % (Name),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
# Validate MACRO Value
#
# <MacroDefinition> ::= [<Comments>]{0,}
@@ -932,13 +939,13 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
#
ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
if ReIsValidMacroValue.match(Value) == None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACROVALUE_INVALID%(Value),
ExtraData=LineContent,
File=FileName,
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACROVALUE_INVALID % (Value),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
return Name, Value
## GenSection
@@ -952,7 +959,7 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
# seperated by space,
# value is statement
#
def GenSection(SectionName, SectionDict, SplitArch=True):
def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
Content = ''
for SectionAttrs in SectionDict:
StatementList = SectionDict[SectionAttrs]
@@ -969,11 +976,29 @@ def GenSection(SectionName, SectionDict, SplitArch=True):
Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
else:
Section = '[' + SectionName + ']'
Content += '\n\n' + Section + '\n'
Content += '\n' + Section + '\n'
if StatementList != None:
for Statement in StatementList:
Content += Statement + '\n'
LineList = Statement.split('\n')
NewStatement = ""
for Line in LineList:
# ignore blank comment
if not Line.replace("#", '').strip() and SectionName != 'Defines':
continue
# add two space before non-comments line except the comments in Defines section
if Line.strip().startswith('#') and SectionName == 'Defines':
NewStatement += "%s\n" % Line
continue
NewStatement += " %s\n" % Line
if NeedBlankLine:
Content += NewStatement + '\n'
else:
Content += NewStatement
if NeedBlankLine:
Content = Content[:-1]
if not Content.replace('\\n', '').strip():
return ''
return Content
## ConvertArchForInstall
@@ -984,10 +1009,10 @@ def GenSection(SectionName, SectionDict, SplitArch=True):
# @return: the arch string that get converted
#
def ConvertArchForInstall(Arch):
if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
Arch = Arch.upper()
elif Arch.upper() == DataType.TAB_ARCH_COMMON:
Arch = Arch.lower()
return Arch

View File

@@ -1,12 +1,12 @@
## @file
# This file is used to define common string related functions used in parsing
# This file is used to define common string related functions used in parsing
# process
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -43,7 +43,7 @@ gMACRO_PATTERN = re.compile("\$\(([_A-Z][_A-Z0-9]*)\)", re.UNICODE)
# @param MaxSplit: The max number of split values, default is -1
#
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit=-1):
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
return map(lambda l: l.strip(), String.split(SplitTag, MaxSplit))
## MergeArches
@@ -129,7 +129,7 @@ def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [
# 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
@@ -164,7 +164,7 @@ def SplitModuleType(Key):
# @param Line: The content contain line string and line number
# @param FileName: The meta-file file name
#
def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line = None, FileName = None, Flag = False):
def ReplaceMacro(String, MacroDefinitions=None, SelfReplacement=False, Line=None, FileName=None, Flag=False):
LastString = String
if MacroDefinitions == None:
MacroDefinitions = {}
@@ -184,10 +184,10 @@ def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line
Count += 1
if Count % 2 != 0:
MacroString += QuotedStringItem
if Count == len(QuotedStringList) and Count%2 == 0:
if Count == len(QuotedStringList) and Count % 2 == 0:
MacroString += QuotedStringItem
MacroUsed = gMACRO_PATTERN.findall(MacroString)
#
# no macro found in String, stop replacing
@@ -198,7 +198,7 @@ def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line
if Macro not in MacroDefinitions:
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" %(FileName, Line[1], Line[0]))
Logger.Debug(5, "Delete undefined MACROs in file %s line %d: %s!" % (FileName, Line[1], Line[0]))
continue
if not HaveQuotedMacroFlag:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
@@ -207,25 +207,25 @@ def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line
for QuotedStringItem in QuotedStringList:
Count += 1
if Count % 2 != 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
elif Count == len(QuotedStringList) and Count%2 == 0:
QuotedStringList[Count-1] = QuotedStringList[Count-1].replace("$(%s)" % Macro,
elif Count == len(QuotedStringList) and Count % 2 == 0:
QuotedStringList[Count - 1] = QuotedStringList[Count - 1].replace("$(%s)" % Macro,
MacroDefinitions[Macro])
RetString = ''
if HaveQuotedMacroFlag:
Count = 0
for QuotedStringItem in QuotedStringList:
Count += 1
Count += 1
if Count != len(QuotedStringList):
RetString += QuotedStringList[Count-1] + "\""
RetString += QuotedStringList[Count - 1] + "\""
else:
RetString += QuotedStringList[Count-1]
RetString += QuotedStringList[Count - 1]
String = RetString
#
#
# in case there's macro not defined
#
if String == LastString:
@@ -242,7 +242,7 @@ def ReplaceMacro(String, MacroDefinitions = None, SelfReplacement = False, Line
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
def NormPath(Path, Defines = None):
def NormPath(Path, Defines=None):
IsRelativePath = False
if Defines == None:
Defines = {}
@@ -269,7 +269,7 @@ def NormPath(Path, Defines = None):
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
@@ -305,7 +305,7 @@ def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyle
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
@@ -394,10 +394,10 @@ def GetDefineValue(String, Key, CommentCharacter):
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value.
# Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has
# @param ValueSplitFlag: Value split flag, be used to decide if has
# multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# @param ValueSplitCharacter: Value split char, be used to split multiple
# values. Key1 = Value1|Value2, '|' is the value
# split char
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, \
@@ -489,7 +489,7 @@ def PreCheck(FileName, FileContent, SupSectionTag):
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
#
# Check []
#
@@ -498,14 +498,14 @@ def PreCheck(FileName, FileContent, SupSectionTag):
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\r\n'
if IsFailed:
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError = Logger.IS_RAISE_ERROR)
Logger.Error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=Logger.IS_RAISE_ERROR)
return NewFileContent
@@ -523,7 +523,7 @@ def PreCheck(FileName, FileContent, SupSectionTag):
# @param Line: The line in container file which defines the file
# to be checked
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo=-1):
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
if CheckFilename != '' and CheckFilename != None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper() and Root:
@@ -544,13 +544,13 @@ def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line,
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to
# @param ContainerFilename: The container file which describes the file to
# be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the
# @param Line: The line in container file which defines the
# file to be checked
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo=-1):
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
CheckFile = ''
if CheckFilename != '' and CheckFilename != None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
@@ -560,7 +560,7 @@ def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName,
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = ST.ERR_CHECKFILE_NOTFOUND % (CheckFile, SectionName)
Logger.Error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line = LineNo, RaiseError=Logger.IS_RAISE_ERROR)
File=ContainerFilename, Line=LineNo, RaiseError=Logger.IS_RAISE_ERROR)
return CheckFile
## GetLineNo
@@ -593,7 +593,7 @@ def GetLineNo(FileContent, Line, IsIgnoreComment=True):
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo=-1):
def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = ST.ERR_INVALID_NOTFOUND % (Line, Section)
@@ -650,7 +650,7 @@ def ConvertToSqlString2(String):
# @param Lines: string list
# @param Split: split character
#
def GetStringOfList(List, Split = ' '):
def GetStringOfList(List, Split=' '):
if type(List) != type([]):
return List
Str = ''
@@ -696,7 +696,7 @@ def StringArrayLength(String):
# @param OptionString: the option string
# @param Which: Which flag
# @param Against: Against flag
#
#
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
@@ -718,14 +718,14 @@ def RemoveDupOption(OptionString, Which="/I", Against=None):
## Check if the string is HexDgit
#
# Return true if all characters in the string are digits and there is at
# least one character
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# , false otherwise.
# @param string: input string
#
def IsHexDigit(Str):
try:
try:
int(Str, 10)
return True
except ValueError:
@@ -737,16 +737,16 @@ def IsHexDigit(Str):
return False
return False
## Check if the string is HexDgit and its integer value within limit of UINT32
## Check if the string is HexDgit and its interger value within limit of UINT32
#
# Return true if all characters in the string are digits and there is at
# least one character
# Return true if all characters in the string are digits and there is at
# least one character
# or valid Hexs (started with 0x, following by hexdigit letters)
# , false otherwise.
# , false otherwise.
# @param string: input string
#
def IsHexDigitUINT32(Str):
try:
try:
Value = int(Str, 10)
if (Value <= 0xFFFFFFFF) and (Value >= 0):
return True
@@ -761,11 +761,11 @@ def IsHexDigitUINT32(Str):
return False
## CleanSpecialChar
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
#
# The ASCII text files of type INF, DEC, INI are edited by developers,
# and may contain characters that cannot be directly translated to strings that
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# are conformant with the UDP XML Schema. Any characters in this category
# (0x00-0x08, TAB [0x09], 0x0B, 0x0C, 0x0E-0x1F, 0x80-0xFF)
# must be converted to a space character[0x20] as part of the parsing process.
#
def ConvertSpecialChar(Lines):
@@ -773,7 +773,7 @@ def ConvertSpecialChar(Lines):
for line in Lines:
ReMatchSpecialChar = re.compile(r"[\x00-\x08]|\x09|\x0b|\x0c|[\x0e-\x1f]|[\x7f-\xff]")
RetLines.append(ReMatchSpecialChar.sub(' ', line))
return RetLines
## __GetTokenList
@@ -817,7 +817,7 @@ def __GetTokenList(Str):
if TokenOP:
List.append(TokenOP)
TokenOP = ''
if PreChar == '\\' and Char == '\\':
PreChar = ''
else:
@@ -870,61 +870,55 @@ def ConvertNOTEQToNE(Expr):
return ''.join(RetList)
## SplitPcdEntry
#
#
# Split an PCD entry string to Token.CName and PCD value and FFE.
# NOTE: PCD Value and FFE can contain "|" in it's expression. And in INF specification, have below rule.
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# When using the characters "|" or "||" in an expression, the expression must be encapsulated in
# open "(" and close ")" parenthesis.
#
#
# @param String An PCD entry string need to be split.
#
# @return List [PcdTokenCName, Value, FFE]
#
# @return List [PcdTokenCName, Value, FFE]
#
def SplitPcdEntry(String):
if not String:
return ['', '',''], False
return ['', '', ''], False
PcdTokenCName = ''
PcdValue = ''
PcdFeatureFlagExp = ''
ValueList = GetSplitValueList(String, "|", 1)
#
# Only contain TokenCName
#
if len(ValueList) == 1:
return [ValueList[0]], True
NewValueList = []
if len(ValueList) == 2:
PcdTokenCName = ValueList[0]
ValueList = GetSplitValueList(ValueList[1], "|")
RemainCount = 0
for Item in ValueList:
ParenthesisCount = 0
for Char in Item:
if Char == "(":
ParenthesisCount += 1
if Char == ")":
ParenthesisCount -= 1
#
# An individual item
#
if RemainCount == 0 and ParenthesisCount >= 0:
NewValueList.append(Item)
RemainCount = ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount >= 0:
NewValueList[-1] = NewValueList[-1] + '|' + Item
RemainCount = RemainCount + ParenthesisCount
elif RemainCount > 0 and RemainCount + ParenthesisCount < 0:
#
# ERROR, return
#
return ['', '', ''], False
InQuote = False
InParenthesis = False
StrItem = ''
for StrCh in ValueList[1]:
if StrCh == '"':
InQuote = not InQuote
elif StrCh == '(' or StrCh == ')':
InParenthesis = not InParenthesis
if StrCh == '|':
if not InQuote or not InParenthesis:
NewValueList.append(StrItem.strip())
StrItem = ' '
continue
StrItem += StrCh
NewValueList.append(StrItem.strip())
if len(NewValueList) == 1:
PcdValue = NewValueList[0]
@@ -935,7 +929,7 @@ def SplitPcdEntry(String):
return [PcdTokenCName, PcdValue, PcdFeatureFlagExp], True
else:
return ['', '', ''], False
return ['', '', ''], False
## Check if two arches matched?

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -22,6 +22,7 @@ XmlRoutines
#
import xml.dom.minidom
import re
import codecs
from Logger.ToolError import PARSER_ERROR
import Logger.Log as Logger
@@ -219,7 +220,7 @@ def XmlNodeName(Dom):
#
def XmlParseFile(FileName):
try:
XmlFile = open(FileName)
XmlFile = codecs.open(FileName, 'rb')
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom