BaseTools: Clean up source files

1. Do not use tab characters
2. No trailing white space in one line
3. All files must end with CRLF

Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Liming Gao <liming.gao@intel.com>
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Reviewed-by: Yonghong Zhu <yonghong.zhu@intel.com>
This commit is contained in:
Liming Gao
2018-07-05 17:40:04 +08:00
parent 39456d00f3
commit f7496d7173
289 changed files with 10647 additions and 10647 deletions

View File

@@ -3,9 +3,9 @@
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -39,21 +39,21 @@ from Library.Misc import GetLocalValue
## GenTailCommentLines
#
# @param TailCommentLines: the tail comment lines that need to be generated
# @param LeadingSpaceNum: the number of leading space needed for non-first
# @param LeadingSpaceNum: the number of leading space needed for non-first
# line tail comment
#
#
def GenTailCommentLines (TailCommentLines, LeadingSpaceNum = 0):
TailCommentLines = TailCommentLines.rstrip(END_OF_LINE)
CommentStr = TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + TAB_SPACE_SPLIT + \
(END_OF_LINE + LeadingSpaceNum * TAB_SPACE_SPLIT + TAB_SPACE_SPLIT*2 + TAB_SPECIAL_COMMENT + \
TAB_SPACE_SPLIT).join(GetSplitValueList(TailCommentLines, END_OF_LINE))
return CommentStr
## GenGenericComment
#
# @param CommentLines: Generic comment Text, maybe Multiple Lines
#
#
def GenGenericComment (CommentLines):
if not CommentLines:
return ''
@@ -68,8 +68,8 @@ def GenGenericComment (CommentLines):
# and for line with only <EOL>, '#\n' will be generated instead of '# \n'
#
# @param CommentLines: Generic comment Text, maybe Multiple Lines
# @return CommentStr: Generated comment line
#
# @return CommentStr: Generated comment line
#
def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibraryClass=False):
if not CommentLines:
return ''
@@ -104,7 +104,7 @@ def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibrary
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT * 16 + Line + END_OF_LINE
else:
CommentStr += TAB_COMMENT_SPLIT * NumOfPound + TAB_SPACE_SPLIT + Line + END_OF_LINE
return CommentStr
@@ -112,7 +112,7 @@ def GenGenericCommentF (CommentLines, NumOfPound=1, IsPrompt=False, IsInfLibrary
#
# Generate Header comment sections
#
# @param Abstract One line of abstract
# @param Abstract One line of abstract
# @param Description multiple lines of Description
# @param Copyright possible multiple copyright lines
# @param License possible multiple license lines
@@ -148,9 +148,9 @@ def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryH
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(Description, '\n'))
Content += '\r\n' + CommChar + '\r\n'
#
# There is no '#\n' line to separate multiple copyright lines in code base
# There is no '#\n' line to separate multiple copyright lines in code base
#
if Copyright:
Copyright = Copyright.rstrip('\r\n')
@@ -163,12 +163,12 @@ def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryH
Content += CommChar + TAB_SPACE_SPLIT + ('\r\n' + CommChar + TAB_SPACE_SPLIT).join(GetSplitValueList\
(License, '\n'))
Content += '\r\n' + CommChar + '\r\n'
if CommChar == TAB_COMMENT_EDK1_SPLIT:
Content += CommChar + TAB_SPACE_SPLIT + TAB_STAR + TAB_COMMENT_EDK1_END + '\r\n'
else:
Content += CommChar * 2 + '\r\n'
return Content
@@ -177,11 +177,11 @@ def GenHeaderCommentSection(Abstract, Description, Copyright, License, IsBinaryH
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
#
def GenInfPcdTailComment (Usage, TailCommentText):
if (Usage == ITEM_UNDEFINED) and (not TailCommentText):
return ''
CommentLine = TAB_SPACE_SPLIT.join([Usage, TailCommentText])
return GenTailCommentLines(CommentLine)
@@ -190,16 +190,16 @@ def GenInfPcdTailComment (Usage, TailCommentText):
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
#
def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText):
if (not Notify) and (Usage == ITEM_UNDEFINED) and (not TailCommentText):
return ''
if Notify:
CommentLine = USAGE_ITEM_NOTIFY + " ## "
else:
CommentLine = ''
CommentLine += TAB_SPACE_SPLIT.join([Usage, TailCommentText])
return GenTailCommentLines(CommentLine)
@@ -208,39 +208,39 @@ def GenInfProtocolPPITailComment (Usage, Notify, TailCommentText):
#
# @param Usage: Usage type
# @param TailCommentText: Comment text for tail comment
#
#
def GenInfGuidTailComment (Usage, GuidTypeList, VariableName, TailCommentText):
GuidType = GuidTypeList[0]
if (Usage == ITEM_UNDEFINED) and (GuidType == ITEM_UNDEFINED) and \
(not TailCommentText):
return ''
FirstLine = Usage + " ## " + GuidType
FirstLine = Usage + " ## " + GuidType
if GuidType == TAB_INF_GUIDTYPE_VAR:
FirstLine += ":" + VariableName
CommentLine = TAB_SPACE_SPLIT.join([FirstLine, TailCommentText])
return GenTailCommentLines(CommentLine)
## GenDecGuidTailComment
#
# @param SupModuleList: Supported module type list
#
def GenDecTailComment (SupModuleList):
#
def GenDecTailComment (SupModuleList):
CommentLine = TAB_SPACE_SPLIT.join(SupModuleList)
return GenTailCommentLines(CommentLine)
## _GetHelpStr
# get HelpString from a list of HelpTextObject, the priority refer to
# get HelpString from a list of HelpTextObject, the priority refer to
# related HLD
#
# @param HelpTextObjList: List of HelpTextObject
#
#
# @return HelpStr: the help text string found, '' means no help text found
#
def _GetHelpStr(HelpTextObjList):
ValueList = []
ValueList = []
for HelpObj in HelpTextObjList:
ValueList.append((HelpObj.GetLang(), HelpObj.GetString()))
return GetLocalValue(ValueList, True)

View File

@@ -3,9 +3,9 @@
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -69,7 +69,7 @@ def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = Fal
else:
STR_HEADER_COMMENT_START = "@file"
HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
#
# first find the last copyright line
#
@@ -79,24 +79,24 @@ def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = Fal
if _IsCopyrightLine(Line):
Last = Index
break
for Item in CommentList:
Line = Item[0]
LineNo = Item[1]
if not Line.startswith(TAB_COMMENT_SPLIT) and Line:
Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_INVALID_COMMENT_FORMAT, FileName, Item[1])
Comment = CleanString2(Line)[1]
Comment = Comment.strip()
#
# if there are blank lines between License or Description, keep them as they would be
# if there are blank lines between License or Description, keep them as they would be
# indication of different block; or in the position that Abstract should be, also keep it
# as it indicates that no abstract
#
if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
continue
if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
if Comment.startswith(STR_HEADER_COMMENT_START):
HeaderCommentStage = HEADER_COMMENT_ABSTRACT
@@ -114,20 +114,20 @@ def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = Fal
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
else:
Abstract += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
#
# in case there is no description
#
#
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Description += Comment + EndOfLine
Description += Comment + EndOfLine
elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
@@ -136,23 +136,23 @@ def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = Fal
else:
#
# Contents after copyright line are license, those non-copyright lines in between
# copyright line will be discarded
# copyright line will be discarded
#
if LineNo > Last:
if License:
License += EndOfLine
License += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_LICENSE
HeaderCommentStage = HEADER_COMMENT_LICENSE
else:
if not Comment and not License:
continue
License += Comment + EndOfLine
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
#
# @param LineContent: the line need to be checked
# @return: True if current line is copyright line, False else
@@ -160,7 +160,7 @@ def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = Fal
def _IsCopyrightLine (LineContent):
LineContent = LineContent.upper()
Result = False
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
@@ -169,23 +169,23 @@ def _IsCopyrightLine (LineContent):
## ParseGenericComment
#
# @param GenericComment: Generic comment list, element of
# @param GenericComment: Generic comment list, element of
# (CommentLine, LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
#
def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
if ContainerFile:
pass
HelpTxt = None
HelpStr = ''
HelpTxt = None
HelpStr = ''
for Item in GenericComment:
CommentLine = Item[0]
Comment = CleanString2(CommentLine)[1]
if SkipTag is not None and Comment.startswith(SkipTag):
Comment = Comment.replace(SkipTag, '', 1)
HelpStr += Comment + '\n'
if HelpStr:
HelpTxt = TextObject()
if HelpStr.endswith('\n') and not HelpStr.endswith('\n\n') and HelpStr != '\n':
@@ -196,22 +196,22 @@ def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
## ParsePcdErrorCode
#
# @param Value: original ErrorCode value
# @param Value: original ErrorCode value
# @param ContainerFile: Input value for filename of Dec file
# @param LineNum: Line Num
#
def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
try:
# @param LineNum: Line Num
#
def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
try:
if Value.strip().startswith((TAB_HEX_START, TAB_CAPHEX_START)):
Base = 16
else:
Base = 10
ErrorCode = long(Value, Base)
if ErrorCode > PCD_ERR_CODE_MAX_SIZE or ErrorCode < 0:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
#
# To delete the tailing 'L'
@@ -220,27 +220,27 @@ def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
except ValueError as XStr:
if XStr:
pass
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
## ParseDecPcdGenericComment
#
# @param GenericComment: Generic comment list, element of (CommentLine,
# @param GenericComment: Generic comment list, element of (CommentLine,
# LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
HelpStr = ''
#
def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
HelpStr = ''
PromptStr = ''
PcdErr = None
PcdErrList = []
ValidValueNum = 0
ValidRangeNum = 0
ExpressionNum = 0
for (CommentLine, LineNum) in GenericComment:
Comment = CleanString2(CommentLine)[1]
#
@@ -252,13 +252,13 @@ def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCNam
if MatchedStr:
Macro = MatchedStr.strip().lstrip('$(').rstrip(')').strip()
if Macro in MacroReplaceDict:
Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
if Comment.startswith(TAB_PCD_VALIDRANGE):
if ValidValueNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
@@ -280,21 +280,21 @@ def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCNam
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_VALIDLIST):
if ValidRangeNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
elif ValidValueNum > 0:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_VALIDVALUE,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
@@ -317,15 +317,15 @@ def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCNam
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_EXPRESSION):
if ValidRangeNum > 0 or ValidValueNum > 0:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
@@ -344,24 +344,24 @@ def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCNam
else:
PcdErr.SetExpression(Expression)
PcdErrList.append(PcdErr)
else:
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_PROMPT):
if PromptStr:
Logger.Error('Parser',
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_PROMPT,
File = ContainerFile,
File = ContainerFile,
Line = LineNum)
PromptStr = Comment.replace(TAB_PCD_PROMPT, "", 1).strip()
else:
if Comment:
HelpStr += Comment + '\n'
#
# remove the last EOL if the comment is of format 'FOO\n'
#
@@ -384,9 +384,9 @@ def ParseDecPcdTailComment (TailCommentList, ContainerFile):
LineNum = TailCommentList[0][1]
Comment = TailComment.lstrip(" #")
ReFindFirstWordRe = re.compile(r"""^([^ #]*)""", re.DOTALL)
#
# get first word and compare with SUP_MODULE_LIST
#
@@ -398,7 +398,7 @@ def ParseDecPcdTailComment (TailCommentList, ContainerFile):
# parse line, it must have supported module type specified
#
if Comment.find(TAB_COMMENT_SPLIT) == -1:
Comment += TAB_COMMENT_SPLIT
Comment += TAB_COMMENT_SPLIT
SupMode, HelpStr = GetSplitValueList(Comment, TAB_COMMENT_SPLIT, 1)
SupModuleList = []
for Mod in GetSplitValueList(SupMode, TAB_SPACE_SPLIT):
@@ -407,8 +407,8 @@ def ParseDecPcdTailComment (TailCommentList, ContainerFile):
elif Mod not in SUP_MODULE_LIST:
Logger.Error("UPT",
FORMAT_INVALID,
ST.WRN_INVALID_MODULE_TYPE%Mod,
ContainerFile,
ST.WRN_INVALID_MODULE_TYPE%Mod,
ContainerFile,
LineNum)
else:
SupModuleList.append(Mod)
@@ -417,15 +417,15 @@ def ParseDecPcdTailComment (TailCommentList, ContainerFile):
## _CheckListExpression
#
# @param Expression: Pcd value list expression
# @param Expression: Pcd value list expression
#
def _CheckListExpression(Expression):
ListExpr = ''
if TAB_VALUE_SPLIT in Expression:
ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
ListExpr = Expression
return IsValidListExpr(ListExpr)
## _CheckExpreesion
@@ -443,14 +443,14 @@ def _CheckExpression(Expression):
## _CheckRangeExpression
#
# @param Expression: Pcd range expression
#
#
def _CheckRangeExpression(Expression):
RangeExpr = ''
if TAB_VALUE_SPLIT in Expression:
RangeExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
RangeExpr = Expression
return IsValidRangeExpr(RangeExpr)
## ValidateCopyright
@@ -459,28 +459,28 @@ def _CheckRangeExpression(Expression):
#
def ValidateCopyright(Result, ErrType, FileName, LineNo, ErrMsg):
if not Result:
Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
## _ValidateCopyright
#
# @param Line: Line that contains copyright information, # stripped
#
#
# @retval Result: True if line is conformed to Spec format, False else
# @retval ErrMsg: the detailed error description
#
#
def _ValidateCopyright(Line):
if Line:
pass
Result = True
ErrMsg = ''
return Result, ErrMsg
def GenerateTokenList (Comment):
#
# Tokenize Comment using '#' and ' ' as token seperators
#
RelplacedComment = None
RelplacedComment = None
while Comment != RelplacedComment:
RelplacedComment = Comment
Comment = Comment.replace('##', '#').replace(' ', ' ').replace(' ', '#').strip('# ')
@@ -500,25 +500,25 @@ def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable)
Usage = None
Type = None
String = None
Comment = Comment[0]
NumTokens = 2
NumTokens = 2
if ParseVariable:
#
# Remove white space around first instance of ':' from Comment if 'Variable'
#
# Remove white space around first instance of ':' from Comment if 'Variable'
# is in front of ':' and Variable is the 1st or 2nd token in Comment.
#
List = Comment.split(':', 1)
List = Comment.split(':', 1)
if len(List) > 1:
SubList = GenerateTokenList (List[0].strip())
if len(SubList) in [1, 2] and SubList[-1] == 'Variable':
if List[1].strip().find('L"') == 0:
if List[1].strip().find('L"') == 0:
Comment = List[0].strip() + ':' + List[1].strip()
#
#
# Remove first instance of L"<VariableName> from Comment and put into String
# if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
# if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
# L"<VariableName>" is the third token immediately following 'Variable:'.
#
End = -1
@@ -533,25 +533,25 @@ def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable)
End = String[2:].find('"')
if End >= 0:
SubList = GenerateTokenList (Comment[:Start])
if len(SubList) < 2:
if len(SubList) < 2:
Comment = Comment[:Start] + String[End + 3:]
String = String[:End + 3]
Type = 'Variable'
NumTokens = 1
NumTokens = 1
#
# Initialze HelpText to Comment.
# Initialze HelpText to Comment.
# Content will be remove from HelpText as matching tokens are found
#
#
HelpText = Comment
#
# Tokenize Comment using '#' and ' ' as token seperators
#
List = GenerateTokenList (Comment)
#
# Search first two tokens for Usage and Type and remove any matching tokens
# Search first two tokens for Usage and Type and remove any matching tokens
# from HelpText
#
for Token in List[0:NumTokens]:
@@ -563,39 +563,39 @@ def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable)
if Type is None and Token in TypeTokens:
Type = TypeTokens[Token]
HelpText = HelpText.replace(Token, '')
if Usage is not None:
if Usage is not None:
for Token in List[0:NumTokens]:
if Token in RemoveTokens:
HelpText = HelpText.replace(Token, '')
#
# If no Usage token is present and set Usage to UNDEFINED
#
#
if Usage is None:
Usage = 'UNDEFINED'
#
# If no Type token is present and set Type to UNDEFINED
#
#
if Type is None:
Type = 'UNDEFINED'
#
# If Type is not 'Variable:', then set String to None
#
#
if Type != 'Variable':
String = None
String = None
#
# Strip ' ' and '#' from the beginning of HelpText
# If HelpText is an empty string after all parsing is
# If HelpText is an empty string after all parsing is
# complete then set HelpText to None
#
#
HelpText = HelpText.lstrip('# ')
if HelpText == '':
HelpText = None
#
# Return parsing results
#
return Usage, Type, String, HelpText
#
return Usage, Type, String, HelpText

View File

@@ -1,11 +1,11 @@
## @file
# This file is used to define class for data type structure
#
# Copyright (c) 2011 - 2016, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -64,13 +64,13 @@ USAGE_ITEM_UNDEFINED = 'UNDEFINED'
USAGE_CONSUMES_LIST = [USAGE_ITEM_CONSUMES,
'CONSUMED',
'ALWAYS_CONSUMED',
'ALWAYS_CONSUMES'
'ALWAYS_CONSUMES'
]
USAGE_PRODUCES_LIST = [USAGE_ITEM_PRODUCES,
'PRODUCED',
'ALWAYS_PRODUCED',
'ALWAYS_PRODUCES'
'ALWAYS_PRODUCES'
]
USAGE_SOMETIMES_PRODUCES_LIST = [USAGE_ITEM_SOMETIMES_PRODUCES,
@@ -94,7 +94,7 @@ TAB_STR_TOKENERR = 'ERR'
#
# Dictionary of usage tokens and their synonmys
#
#
ALL_USAGE_TOKENS = {
"PRODUCES" : "PRODUCES",
"PRODUCED" : "PRODUCES",
@@ -109,20 +109,20 @@ ALL_USAGE_TOKENS = {
"SOMETIMES_CONSUMES" : "SOMETIMES_CONSUMES",
"SOMETIMES_CONSUMED" : "SOMETIMES_CONSUMES",
"SOMETIME_CONSUMES" : "SOMETIMES_CONSUMES",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
PROTOCOL_USAGE_TOKENS = {
"TO_START" : "TO_START",
"BY_START" : "BY_START"
}
PROTOCOL_USAGE_TOKENS.update (ALL_USAGE_TOKENS)
#
# Dictionary of GUID type tokens
#
GUID_TYPE_TOKENS = {
#
GUID_TYPE_TOKENS = {
"Event" : "Event",
"File" : "File",
"FV" : "FV",
@@ -134,31 +134,31 @@ GUID_TYPE_TOKENS = {
"Hob:" : "HOB",
"SystemTable" : "SystemTable",
"TokenSpaceGuid" : "TokenSpaceGuid",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
#
# Dictionary of Protocol Notify tokens and their synonyms
#
PROTOCOL_NOTIFY_TOKENS = {
#
PROTOCOL_NOTIFY_TOKENS = {
"NOTIFY" : "NOTIFY",
"PROTOCOL_NOTIFY" : "NOTIFY",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
#
# Dictionary of PPI Notify tokens and their synonyms
#
PPI_NOTIFY_TOKENS = {
#
PPI_NOTIFY_TOKENS = {
"NOTIFY" : "NOTIFY",
"PPI_NOTIFY" : "NOTIFY",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
EVENT_TOKENS = {
"EVENT_TYPE_PERIODIC_TIMER" : "EVENT_TYPE_PERIODIC_TIMER",
"EVENT_TYPE_RELATIVE_TIMER" : "EVENT_TYPE_RELATIVE_TIMER",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
BOOTMODE_TOKENS = {
@@ -182,16 +182,16 @@ BOOTMODE_TOKENS = {
"RECOVERY_S4_RESUME" : "RECOVERY_S4_RESUME",
"RECOVERY_S5_RESUME" : "RECOVERY_S5_RESUME",
"RECOVERY_FLASH_UPDATE" : "RECOVERY_FLASH_UPDATE",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
HOB_TOKENS = {
HOB_TOKENS = {
"PHIT" : "PHIT",
"MEMORY_ALLOCATION" : "MEMORY_ALLOCATION",
"LOAD_PEIM" : "LOAD_PEIM",
"RESOURCE_DESCRIPTOR" : "RESOURCE_DESCRIPTOR",
"FIRMWARE_VOLUME" : "FIRMWARE_VOLUME",
"UNDEFINED" : "UNDEFINED"
"UNDEFINED" : "UNDEFINED"
}
##
@@ -223,22 +223,22 @@ PCD_DIRVER_TYPE_LIST = ["PEI_PCD_DRIVER", "DXE_PCD_DRIVER"]
#
BOOT_MODE_LIST = ["FULL",
"MINIMAL",
"NO_CHANGE",
"DIAGNOSTICS",
"DEFAULT",
"NO_CHANGE",
"DIAGNOSTICS",
"DEFAULT",
"S2_RESUME",
"S3_RESUME",
"S4_RESUME",
"S5_RESUME",
"S3_RESUME",
"S4_RESUME",
"S5_RESUME",
"FLASH_UPDATE",
"RECOVERY_FULL",
"RECOVERY_MINIMAL",
"RECOVERY_FULL",
"RECOVERY_MINIMAL",
"RECOVERY_NO_CHANGE",
"RECOVERY_DIAGNOSTICS",
"RECOVERY_DIAGNOSTICS",
"RECOVERY_DEFAULT",
"RECOVERY_S2_RESUME",
"RECOVERY_S2_RESUME",
"RECOVERY_S3_RESUME",
"RECOVERY_S4_RESUME",
"RECOVERY_S4_RESUME",
"RECOVERY_S5_RESUME",
"RECOVERY_FLASH_UPDATE"]
@@ -251,9 +251,9 @@ EVENT_TYPE_LIST = ["EVENT_TYPE_PERIODIC_TIMER",
##
# Hob Type List Items
#
HOB_TYPE_LIST = ["PHIT",
HOB_TYPE_LIST = ["PHIT",
"MEMORY_ALLOCATION",
"RESOURCE_DESCRIPTOR",
"RESOURCE_DESCRIPTOR",
"FIRMWARE_VOLUME",
"LOAD_PEIM"]
@@ -290,19 +290,19 @@ BINARY_FILE_TYPE_LIST = ["PE32", "PIC", "TE", "DXE_DEPEX", "VER", "UI", "COMPAT1
BINARY_FILE_TYPE_LIST_IN_UDP = \
["GUID", "FREEFORM",
"UEFI_IMAGE", "PE32", "PIC",
"PEI_DEPEX",
"PEI_DEPEX",
"DXE_DEPEX",
"SMM_DEPEX",
"FV", "TE",
"BIN", "VER", "UI"
"BIN", "VER", "UI"
]
SUBTYPE_GUID_BINARY_FILE_TYPE = "FREEFORM"
##
# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
# the table,
# "Component (module) Types." For each component, the BASE_NAME and
# COMPONENT_TYPE
# Possible values for COMPONENT_TYPE, and their descriptions, are listed in
# the table,
# "Component (module) Types." For each component, the BASE_NAME and
# COMPONENT_TYPE
# are required. The COMPONENT_TYPE definition is case sensitive.
#
COMPONENT_TYPE_LIST = [
@@ -436,7 +436,7 @@ BINARY_FILE_TYPE_FV = 'FV'
BINARY_FILE_TYPE_UI_LIST = [BINARY_FILE_TYPE_UNI_UI,
BINARY_FILE_TYPE_SEC_UI,
BINARY_FILE_TYPE_UI
]
]
BINARY_FILE_TYPE_VER_LIST = [BINARY_FILE_TYPE_UNI_VER,
BINARY_FILE_TYPE_SEC_VER,
BINARY_FILE_TYPE_VER
@@ -712,7 +712,7 @@ TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
TAB_INF_DEFINES_PACKAGE = 'PACKAGE'
TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
TAB_INF_DEFINES_VERSION = 'VERSION'
TAB_INF_DEFINES_VERSION = 'VERSION'
TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
TAB_INF_DEFINES_TIANO_EDK1_FLASHMAP_H = 'TIANO_EDK1_FLASHMAP_H'
@@ -720,9 +720,9 @@ TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
TAB_INF_DEFINES_PCI_VENDOR_ID = 'PCI_VENDOR_ID'
TAB_INF_DEFINES_PCI_DEVICE_ID = 'PCI_DEVICE_ID'
TAB_INF_DEFINES_PCI_CLASS_CODE = 'PCI_CLASS_CODE'
TAB_INF_DEFINES_PCI_VENDOR_ID = 'PCI_VENDOR_ID'
TAB_INF_DEFINES_PCI_DEVICE_ID = 'PCI_DEVICE_ID'
TAB_INF_DEFINES_PCI_CLASS_CODE = 'PCI_CLASS_CODE'
TAB_INF_DEFINES_PCI_REVISION = 'PCI_REVISION'
TAB_INF_DEFINES_PCI_COMPRESS = 'PCI_COMPRESS'
TAB_INF_DEFINES_DEFINE = 'DEFINE'
@@ -819,12 +819,12 @@ TAB_IF_EXIST = '!if exist'
TAB_UNKNOWN = 'UNKNOWN'
#
# Header section (virtual section for abstract, description, copyright,
# Header section (virtual section for abstract, description, copyright,
# license)
#
TAB_HEADER = 'Header'
TAB_HEADER_ABSTRACT = 'Abstract'
TAB_HEADER_DESCRIPTION = 'Description'
TAB_HEADER_DESCRIPTION = 'Description'
TAB_HEADER_COPYRIGHT = 'Copyright'
TAB_HEADER_LICENSE = 'License'
TAB_BINARY_HEADER_IDENTIFIER = 'BinaryHeader'
@@ -833,7 +833,7 @@ TAB_BINARY_HEADER_USERID = 'TianoCore'
#
# Build database path
#
DATABASE_PATH = ":memory:"
DATABASE_PATH = ":memory:"
#
# used by ECC
#
@@ -855,7 +855,7 @@ TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
#
# used to indicate the state of processing header comment section of dec,
# used to indicate the state of processing header comment section of dec,
# inf files
#
HEADER_COMMENT_NOT_STARTED = -1
@@ -953,6 +953,6 @@ TOOL_FAMILY_LIST = ["MSFT",
TYPE_HOB_SECTION = 'HOB'
TYPE_EVENT_SECTION = 'EVENT'
TYPE_BOOTMODE_SECTION = 'BOOTMODE'
TYPE_BOOTMODE_SECTION = 'BOOTMODE'
PCD_ERR_CODE_MAX_SIZE = 4294967295

View File

@@ -1,11 +1,11 @@
## @file
# This file is used to check PCD logical expression
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -47,7 +47,7 @@ def IsValidBareCString(String):
and (IntChar < 0x23 or IntChar > 0x7e):
return False
PreChar = Char
# Last char cannot be \ if PreChar is not \
if LastChar == '\\' and PreChar == LastChar:
return False
@@ -83,7 +83,7 @@ class _ExprBase:
self.Token = Token
self.Index = 0
self.Len = len(Token)
## SkipWhitespace
#
def SkipWhitespace(self):
@@ -91,14 +91,14 @@ class _ExprBase:
if Char not in ' \t':
break
self.Index += 1
## IsCurrentOp
#
# @param OpList: option list
#
# @param OpList: option list
#
def IsCurrentOp(self, OpList):
self.SkipWhitespace()
LetterOp = ["EQ", "NE", "GE", "LE", "GT", "LT", "NOT", "and", "AND",
LetterOp = ["EQ", "NE", "GE", "LE", "GT", "LT", "NOT", "and", "AND",
"or", "OR", "XOR"]
OpMap = {
'|' : '|',
@@ -107,11 +107,11 @@ class _ExprBase:
'>' : '=',
'<' : '='
}
for Operator in OpList:
if not self.Token[self.Index:].startswith(Operator):
continue
self.Index += len(Operator)
Char = self.Token[self.Index : self.Index + 1]
@@ -119,36 +119,36 @@ class _ExprBase:
or (Operator in OpMap and OpMap[Operator] == Char):
self.Index -= len(Operator)
break
return True
return False
## _LogicalExpressionParser
#
# @param _ExprBase: _ExprBase object
#
#
class _LogicalExpressionParser(_ExprBase):
#
# STRINGITEM can only be logical field according to spec
#
STRINGITEM = -1
#
# Evaluate to True or False
#
LOGICAL = 0
REALLOGICAL = 2
#
# Just arithmetic expression
#
ARITH = 1
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.Parens = 0
def _CheckToken(self, MatchList):
for Match in MatchList:
if Match and Match.start() == 0:
@@ -156,7 +156,7 @@ class _LogicalExpressionParser(_ExprBase):
self.Token[self.Index:self.Index+Match.end()]
):
return False
self.Index += Match.end()
if self.Token[self.Index - 1] == '"':
return True
@@ -164,61 +164,61 @@ class _LogicalExpressionParser(_ExprBase):
self.Token[self.Index:self.Index+1].isalnum():
self.Index -= Match.end()
return False
Token = self.Token[self.Index - Match.end():self.Index]
if Token.strip() in ["EQ", "NE", "GE", "LE", "GT", "LT",
"NOT", "and", "AND", "or", "OR", "XOR"]:
self.Index -= Match.end()
return False
return True
return False
def IsAtomicNumVal(self):
#
# Hex number
#
Match1 = re.compile(self.HEX_PATTERN).match(self.Token[self.Index:])
#
# Number
#
Match2 = re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
#
# Macro
#
Match3 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
#
# PcdName
#
Match4 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
return self._CheckToken([Match1, Match2, Match3, Match4])
def IsAtomicItem(self):
#
# Macro
#
Match1 = re.compile(self.MACRO_PATTERN).match(self.Token[self.Index:])
#
# PcdName
#
Match2 = re.compile(self.PCD_PATTERN).match(self.Token[self.Index:])
#
# Quoted string
#
Match3 = re.compile(self.QUOTED_PATTERN).\
match(self.Token[self.Index:].replace('\\\\', '//').\
replace('\\\"', '\\\''))
return self._CheckToken([Match1, Match2, Match3])
## A || B
#
def LogicalExpression(self):
@@ -233,12 +233,12 @@ class _LogicalExpressionParser(_ExprBase):
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
def SpecNot(self):
if self.IsCurrentOp(["NOT", "!", "not"]):
return self.SpecNot()
return self.Rel()
## A < B, A > B, A <= B, A >= B
#
def Rel(self):
@@ -252,7 +252,7 @@ class _LogicalExpressionParser(_ExprBase):
raise _ExprError(ST.ERR_EXPR_LOGICAL % self.Token)
Ret = self.REALLOGICAL
return Ret
## A + B, A - B
#
def Expr(self):
@@ -269,7 +269,7 @@ class _LogicalExpressionParser(_ExprBase):
return Ret
## Factor
#
#
def Factor(self):
if self.IsCurrentOp(["("]):
self.Parens += 1
@@ -279,7 +279,7 @@ class _LogicalExpressionParser(_ExprBase):
(self.Token, self.Token[self.Index:]))
self.Parens -= 1
return Ret
if self.IsAtomicItem():
if self.Token[self.Index - 1] == '"':
return self.STRINGITEM
@@ -289,7 +289,7 @@ class _LogicalExpressionParser(_ExprBase):
else:
raise _ExprError(ST.ERR_EXPR_FACTOR % \
(self.Token[self.Index:], self.Token))
## IsValidLogicalExpression
#
def IsValidLogicalExpression(self):
@@ -319,7 +319,7 @@ class _ValidRangeExpressionParser(_ExprBase):
self.INT = 2
self.IsParenHappen = False
self.IsLogicalOpHappen = False
## IsValidRangeExpression
#
def IsValidRangeExpression(self):
@@ -330,12 +330,12 @@ class _ValidRangeExpressionParser(_ExprBase):
return False, ST.ERR_EXPR_RANGE % self.Token
except _ExprError as XExcept:
return False, XExcept.Error
self.SkipWhitespace()
if self.Index != self.Len:
return False, (ST.ERR_EXPR_RANGE % self.Token)
return True, ''
## RangeExpression
#
def RangeExpression(self):
@@ -346,22 +346,22 @@ class _ValidRangeExpressionParser(_ExprBase):
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
self.IsParenHappen = False
Ret = self.Unary()
if self.IsCurrentOp(['XOR']):
Ret = self.Unary()
return Ret
## Unary
#
def Unary(self):
if self.IsCurrentOp(["NOT"]):
return self.Unary()
return self.ValidRange()
## ValidRange
#
#
def ValidRange(self):
Ret = -1
if self.IsCurrentOp(["("]):
@@ -375,10 +375,10 @@ class _ValidRangeExpressionParser(_ExprBase):
raise _ExprError(ST.ERR_EXPR_RIGHT_PAREN % self.Token)
self.Parens -= 1
return Ret
if self.IsLogicalOpHappen:
raise _ExprError(ST.ERR_PAREN_NOT_USED % self.Token)
if self.IsCurrentOp(["LT", "GT", "LE", "GE", "EQ", "XOR"]):
IntMatch = \
re.compile(self.INT_PATTERN).match(self.Token[self.Index:])
@@ -417,7 +417,7 @@ class _ValidListExpressionParser(_ExprBase):
def __init__(self, Token):
_ExprBase.__init__(self, Token)
self.NUM = 1
def IsValidListExpression(self):
if self.Len == 0:
return False, ST.ERR_EXPR_LIST_EMPTY
@@ -432,7 +432,7 @@ class _ValidListExpressionParser(_ExprBase):
return False, (ST.ERR_EXPR_LIST % self.Token)
return True, ''
def ListExpression(self):
Ret = -1
self.SkipWhitespace()
@@ -444,7 +444,7 @@ class _ValidListExpressionParser(_ExprBase):
raise _ExprError(ST.ERR_EXPR_LIST % self.Token)
return Ret
## _StringTestParser
#
class _StringTestParser(_ExprBase):
@@ -452,7 +452,7 @@ class _StringTestParser(_ExprBase):
_ExprBase.__init__(self, Token)
## IsValidStringTest
#
#
def IsValidStringTest(self):
if self.Len == 0:
return False, ST.ERR_EXPR_EMPTY
@@ -463,7 +463,7 @@ class _StringTestParser(_ExprBase):
return True, ''
## StringItem
#
#
def StringItem(self):
Match1 = re.compile(self.QUOTED_PATTERN)\
.match(self.Token[self.Index:].replace('\\\\', '//')\
@@ -489,7 +489,7 @@ class _StringTestParser(_ExprBase):
(self.Token, self.Token[self.Index:]))
## StringTest
#
#
def StringTest(self):
self.StringItem()
if not self.IsCurrentOp(["==", "EQ", "!=", "NE"]):
@@ -538,7 +538,7 @@ def IsValidRangeExpr(Token):
##
# Check syntax of value list expression token
#
# @param Token: value list expression token
# @param Token: value list expression token
#
def IsValidListExpr(Token):
return _ValidListExpressionParser(Token).IsValidListExpression()
@@ -562,7 +562,7 @@ def IsValidFeatureFlagExp(Token, Flag=False):
if not Valid:
Valid, Cause = IsValidLogicalExpr(Token, Flag)
if not Valid:
return False, Cause
return False, Cause
return True, ""
if __name__ == '__main__':
@@ -570,4 +570,4 @@ if __name__ == '__main__':
print(_LogicalExpressionParser('gCrownBayTokenSpaceGuid.PcdPciDevice1BridgeAddressLE0').IsValidLogicalExpression())

View File

@@ -1,11 +1,11 @@
## @file
# This file is used to define common static strings and global data used by UPT
#
# Copyright (c) 2011 - 2017, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -16,7 +16,7 @@ GlobalData
'''
#
# The workspace directory
# The workspace directory
#
gWORKSPACE = '.'
gPACKAGE_PATH = None

View File

@@ -3,9 +3,9 @@
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -52,7 +52,7 @@ from Object.POM.CommonObject import TextObject
from Core.FileHook import __FileHookOpen__
from Common.MultipleWorkspace import MultipleWorkspace as mws
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C
# structure style
#
# @param Guid: The GUID string
@@ -87,7 +87,7 @@ def CheckGuidRegFormat(GuidValue):
return False
## Convert GUID string in C structure style to
## Convert GUID string in C structure style to
# xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue: The GUID value in C structure format
@@ -151,12 +151,12 @@ def RemoveDirectory(Directory, Recursively=False):
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what
# quite useful for "make" system to decide what will be re-built and what
# won't.
#
# @param File: The path of file
# @param Content: The new content of the file
# @param IsBinaryFile: The flag indicating if the file is binary file
# @param IsBinaryFile: The flag indicating if the file is binary file
# or not
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
@@ -216,7 +216,7 @@ def GetFiles(Root, SkipList=None, FullPath=True):
# @param FullPath: True if the returned file should be full path
# @param PrefixPath: the path that need to be added to the files found
# @return: the list of files found
#
#
def GetNonMetaDataFiles(Root, SkipList, FullPath, PrefixPath):
FileList = GetFiles(Root, SkipList, FullPath)
NewFileList = []
@@ -602,25 +602,25 @@ def GetWorkspace():
## Get relative path
#
# use full path and workspace to get relative path
# the destination of this function is mainly to resolve the root path issue(like c: or c:\)
# the destination of this function is mainly to resolve the root path issue(like c: or c:\)
#
# @param Fullpath: a string of fullpath
# @param Workspace: a string of workspace
#
def GetRelativePath(Fullpath, Workspace):
RelativePath = ''
if Workspace.endswith(os.sep):
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace):]
else:
RelativePath = Fullpath[Fullpath.upper().find(Workspace.upper())+len(Workspace)+1:]
return RelativePath
## Check whether all module types are in list
#
# check whether all module types (SUP_MODULE_LIST) are in list
#
#
# @param ModuleList: a list of ModuleType
#
def IsAllModuleList(ModuleList):
@@ -632,9 +632,9 @@ def IsAllModuleList(ModuleList):
return True
## Dictionary that use comment(GenericComment, TailComment) as value,
# if a new comment which key already in the dic is inserted, then the
# if a new comment which key already in the dic is inserted, then the
# comment will be merged.
# Key is (Statement, SupArch), when TailComment is added, it will ident
# Key is (Statement, SupArch), when TailComment is added, it will ident
# according to Statement
#
class MergeCommentDict(dict):
@@ -671,7 +671,7 @@ def GenDummyHelpTextObj():
# <Major> ::= (a-fA-F0-9){4}
# <Minor> ::= (a-fA-F0-9){4}
# <DecVersion> ::= (0-65535) ["." (0-99)]
#
#
# @param StringIn: The string contains version defined in INF file.
# It can be Decimal or Hex
#
@@ -764,7 +764,7 @@ def ConvertPath(Path):
## ConvertSpec
#
# during install, convert the Spec string extract from UPD into INF allowable definition,
# during install, convert the Spec string extract from UPD into INF allowable definition,
# the difference is period is allowed in the former (not the first letter) but not in the latter.
# return converted Spec string
#
@@ -787,7 +787,7 @@ def ConvertSpec(SpecStr):
# The rule is elements in List A are in List B and elements in List B are in List A.
#
# @param ListA, ListB Lists need to be judged.
#
#
# @return True ListA and ListB are identical
# @return False ListA and ListB are different with each other
#
@@ -808,10 +808,10 @@ def IsEqualList(ListA, ListB):
## ConvertArchList
#
# Convert item in ArchList if the start character is lower case.
# In UDP spec, Arch is only allowed as: [A-Z]([a-zA-Z0-9])*
# In UDP spec, Arch is only allowed as: [A-Z]([a-zA-Z0-9])*
#
# @param ArchList The ArchList need to be converted.
#
#
# @return NewList The ArchList been converted.
#
def ConvertArchList(ArchList):
@@ -835,7 +835,7 @@ def ConvertArchList(ArchList):
# If one line ends with a line extender, then it will be combined together with next line.
#
# @param LineList The LineList need to be processed.
#
#
# @return NewList The ArchList been processed.
#
def ProcessLineExtender(LineList):
@@ -854,11 +854,11 @@ def ProcessLineExtender(LineList):
## ProcessEdkComment
#
# Process EDK style comment in LineList: c style /* */ comment or cpp style // comment
# Process EDK style comment in LineList: c style /* */ comment or cpp style // comment
#
#
# @param LineList The LineList need to be processed.
#
#
# @return LineList The LineList been processed.
# @return FirstPos Where Edk comment is first found, -1 if not found
#
@@ -868,7 +868,7 @@ def ProcessEdkComment(LineList):
StartPos = -1
EndPos = -1
FirstPos = -1
while(Count < len(LineList)):
Line = LineList[Count].strip()
if Line.startswith("/*"):
@@ -886,7 +886,7 @@ def ProcessEdkComment(LineList):
FindEdkBlockComment = True
break
Count = Count + 1
if FindEdkBlockComment:
if FirstPos == -1:
FirstPos = StartPos
@@ -900,9 +900,9 @@ def ProcessEdkComment(LineList):
LineList[Count] = Line.replace("//", '#')
if FirstPos == -1:
FirstPos = Count
Count = Count + 1
return LineList, FirstPos
## GetLibInstanceInfo
@@ -994,13 +994,13 @@ def GetLibInstanceInfo(String, WorkSpace, LineNo):
## GetLocalValue
#
# Generate the local value for INF and DEC file. If Lang attribute not present, then use this value.
# If present, and there is no element without the Lang attribute, and one of the elements has the rfc1766 code is
# "en-x-tianocore", or "en-US" if "en-x-tianocore" was not found, or "en" if "en-US" was not found, or startswith 'en'
# If present, and there is no element without the Lang attribute, and one of the elements has the rfc1766 code is
# "en-x-tianocore", or "en-US" if "en-x-tianocore" was not found, or "en" if "en-US" was not found, or startswith 'en'
# if 'en' was not found, then use this value.
# If multiple entries of a tag exist which have the same language code, use the last entry.
#
# @param ValueList A list need to be processed.
# @param UseFirstValue: True to use the first value, False to use the last value
# @param UseFirstValue: True to use the first value, False to use the last value
#
# @return LocalValue
def GetLocalValue(ValueList, UseFirstValue=False):
@@ -1040,7 +1040,7 @@ def GetLocalValue(ValueList, UseFirstValue=False):
Value5 = Value
else:
Value5 = Value
if Value1:
return Value1
if Value2:
@@ -1051,7 +1051,7 @@ def GetLocalValue(ValueList, UseFirstValue=False):
return Value4
if Value5:
return Value5
return ''
@@ -1088,29 +1088,29 @@ def GetCharIndexOutStr(CommentCharacter, Line):
#
# Check the UNI file path
#
# @param FilePath: The UNI file path
# @param FilePath: The UNI file path
#
def ValidateUNIFilePath(Path):
Suffix = Path[Path.rfind(TAB_SPLIT):]
#
# Check if the suffix is one of the '.uni', '.UNI', '.Uni'
# Check if the suffix is one of the '.uni', '.UNI', '.Uni'
#
if Suffix not in TAB_UNI_FILE_SUFFIXS:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_SUFFIX_WRONG,
ExtraData=Path)
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_SUFFIX_WRONG,
ExtraData=Path)
#
# Check if '..' in the file name(without suffixe)
#
if (TAB_SPLIT + TAB_SPLIT) in Path:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_NAME_INVALID,
ExtraData=Path)
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_FILE_NAME_INVALID,
ExtraData=Path)
#
# Check if the file name is valid according to the DEC and INF specification
#
@@ -1118,8 +1118,8 @@ def ValidateUNIFilePath(Path):
FileName = Path.replace(Suffix, '')
InvalidCh = re.sub(Pattern, '', FileName)
if InvalidCh:
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID,
ExtraData=Path)
Logger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
Message=ST.ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID,
ExtraData=Path)

View File

@@ -3,9 +3,9 @@
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -42,7 +42,7 @@ def __HexDigit(TempChar):
return True
else:
return False
## IsValidHex() method
#
# Whether char input is a Hex data.
@@ -59,7 +59,7 @@ def IsValidHex(HexStr):
return False
## Judge the input string is valid bool type or not.
#
#
# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
# <BoolType> ::= {<TRUE>} {<FALSE>}
@@ -90,18 +90,18 @@ def IsValidBoolType(BoolString):
#
else:
return False
## Is Valid Module Type List or not
#
# @param ModuleTypeList: A list contain ModuleType strings need to be
## Is Valid Module Type List or not
#
# @param ModuleTypeList: A list contain ModuleType strings need to be
# judged.
#
def IsValidInfMoudleTypeList(ModuleTypeList):
for ModuleType in ModuleTypeList:
return IsValidInfMoudleType(ModuleType)
## Is Valid Module Type or not
#
## Is Valid Module Type or not
#
# @param ModuleType: A string contain ModuleType need to be judged.
#
def IsValidInfMoudleType(ModuleType):
@@ -110,8 +110,8 @@ def IsValidInfMoudleType(ModuleType):
else:
return False
## Is Valid Component Type or not
#
## Is Valid Component Type or not
#
# @param ComponentType: A string contain ComponentType need to be judged.
#
def IsValidInfComponentType(ComponentType):
@@ -124,7 +124,7 @@ def IsValidInfComponentType(ComponentType):
## Is valid Tool Family or not
#
# @param ToolFamily: A string contain Tool Family need to be judged.
# Famlily := [A-Z]([a-zA-Z0-9])*
# Famlily := [A-Z]([a-zA-Z0-9])*
#
def IsValidToolFamily(ToolFamily):
ReIsValieFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
@@ -148,13 +148,13 @@ def IsValidToolTagName(TagName):
return True
## Is valid arch or not
#
#
# @param Arch The arch string need to be validated
# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
# {"common"}
# @param Arch: Input arch
#
#
def IsValidArch(Arch):
if Arch == 'common':
return True
@@ -164,55 +164,55 @@ def IsValidArch(Arch):
return True
## Is valid family or not
#
#
# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
#
# @param family: The family string need to be validated
#
#
def IsValidFamily(Family):
Family = Family.strip()
if Family == '*':
return True
if Family == '':
return True
ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValidFamily.match(Family) is None:
return False
return True
## Is valid build option name or not
#
#
# @param BuildOptionName: The BuildOptionName string need to be validated
#
def IsValidBuildOptionName(BuildOptionName):
if not BuildOptionName:
return False
ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
if len(ToolOptionList) != 5:
return False
ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
if ReIsValidBuildOption1.match(ToolOptionList[0]) is None:
return False
if ReIsValidBuildOption1.match(ToolOptionList[1]) is None:
return False
if ReIsValidBuildOption2.match(ToolOptionList[2]) is None:
return False
if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
return False
return True
## IsValidToken
#
# Check if pattern string matches total token
@@ -234,14 +234,14 @@ def IsValidToken(ReString, Token):
def IsValidPath(Path, Root):
Path = Path.strip()
OrigPath = Path.replace('\\', '/')
Path = os.path.normpath(Path).replace('\\', '/')
Root = os.path.normpath(Root).replace('\\', '/')
FullPath = mws.join(Root, Path)
if not os.path.exists(FullPath):
return False
#
# If Path is absolute path.
# It should be in Root.
@@ -263,16 +263,16 @@ def IsValidPath(Path, Root):
for Rel in ['/.', '/..', '/']:
if OrigPath.endswith(Rel):
return False
Path = Path.rstrip('/')
#
# Check relative path
#
for Word in Path.split('/'):
if not IsValidWord(Word):
return False
return True
## IsValidInstallPath
@@ -294,12 +294,12 @@ def IsValidInstallPath(Path):
return False
if Path.startswith('.'):
return False
if Path.find('..') != -1:
return False
return True
## IsValidCFormatGuid
#
@@ -309,14 +309,14 @@ def IsValidInstallPath(Path):
#
def IsValidCFormatGuid(Guid):
#
# Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
# Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }} 0x123
# Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
#
List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
Index = 0
Value = ''
@@ -337,14 +337,14 @@ def IsValidCFormatGuid(Guid):
if not Value.startswith('0x') and not Value.startswith('0X'):
return False
#
# Index may out of bound
#
if not isinstance(List[Index], type(1)) or \
len(Value) > List[Index] or len(Value) < 3:
return False
#
# Check if string can be converted to integer
# Throw exception if not
@@ -367,23 +367,23 @@ def IsValidCFormatGuid(Guid):
# Check whether the PCD type is valid
#
# @param PcdTypeString: The PcdType string need to be checked.
#
#
def IsValidPcdType(PcdTypeString):
if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
return True
else:
return False
## IsValidWord
#
# Check whether the word is valid.
# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
# optional
# dash "-" and/or underscore "_" characters. No whitespace
# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
# optional
# dash "-" and/or underscore "_" characters. No whitespace
# characters are permitted.
#
#
# @param Word: The word string need to be checked.
#
#
def IsValidWord(Word):
if not Word:
return False
@@ -394,9 +394,9 @@ def IsValidWord(Word):
not Word[0] == '_' and \
not Word[0].isdigit():
return False
LastChar = ''
for Char in Word[1:]:
for Char in Word[1:]:
if (not Char.isalpha()) and \
(not Char.isdigit()) and \
Char != '-' and \
@@ -406,82 +406,82 @@ def IsValidWord(Word):
if Char == '.' and LastChar == '.':
return False
LastChar = Char
return True
## IsValidSimpleWord
#
# Check whether the SimpleWord is valid.
# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
# A word that cannot contain a period character.
#
#
# @param Word: The word string need to be checked.
#
#
def IsValidSimpleWord(Word):
ReIsValidSimpleWord = \
re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
Word = Word.strip()
if not Word:
return False
if not ReIsValidSimpleWord.match(Word):
return False
return True
## IsValidDecVersion
#
# Check whether the decimal version is valid.
# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
#
#
# @param Word: The word string need to be checked.
#
#
def IsValidDecVersion(Word):
if Word.find('.') > -1:
ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
else:
ReIsValidDecVersion = re.compile(r"[0-9]+$")
if ReIsValidDecVersion.match(Word) is None:
return False
return False
return True
## IsValidHexVersion
#
# Check whether the hex version is valid.
# <HexVersion> ::= "0x" <Major> <Minor>
# <Major> ::= <HexDigit>{4}
# <Minor> ::= <HexDigit>{4}
#
#
# @param Word: The word string need to be checked.
#
#
def IsValidHexVersion(Word):
ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
if ReIsValidHexVersion.match(Word) is None:
return False
return True
## IsValidBuildNumber
#
# Check whether the BUILD_NUMBER is valid.
# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
#
#
# @param Word: The BUILD_NUMBER string need to be checked.
#
#
def IsValidBuildNumber(Word):
ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
if ReIsValieBuildNumber.match(Word) is None:
return False
return True
## IsValidDepex
#
# Check whether the Depex is valid.
#
#
# @param Word: The Depex string need to be checked.
#
#
def IsValidDepex(Word):
Index = Word.upper().find("PUSH")
if Index > -1:
@@ -490,12 +490,12 @@ def IsValidDepex(Word):
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
if ReIsValidCName.match(Word) is None:
return False
return True
## IsValidNormalizedString
#
# Check
# Check
# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
# <Space> ::= 0x20
#
@@ -504,31 +504,31 @@ def IsValidDepex(Word):
def IsValidNormalizedString(String):
if String == '':
return True
for Char in String:
if Char == '\t':
return False
StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
for Item in StringList:
if not Item:
continue
if not IsValidWord(Item):
return False
return True
## IsValidIdString
#
# Check whether the IdString is valid.
#
#
# @param IdString: The IdString need to be checked.
#
#
def IsValidIdString(String):
if IsValidSimpleWord(String.strip()):
return True
if String.strip().startswith('"') and \
String.strip().endswith('"'):
String = String[1:-1]
@@ -536,7 +536,7 @@ def IsValidIdString(String):
return True
if IsValidNormalizedString(String):
return True
return False
## IsValidVersionString
@@ -546,52 +546,52 @@ def IsValidIdString(String):
# <WhiteSpace> ::= {<Tab>} {<Space>}
# <Tab> ::= 0x09
# <Space> ::= 0x20
# <AsciiChars> ::= (0x21 - 0x7E)
#
# <AsciiChars> ::= (0x21 - 0x7E)
#
# @param VersionString: The VersionString need to be checked.
#
#
def IsValidVersionString(VersionString):
VersionString = VersionString.strip()
for Char in VersionString:
if not (Char >= 0x21 and Char <= 0x7E):
return False
return True
## IsValidPcdValue
#
# Check whether the PcdValue is valid.
#
#
# @param VersionString: The PcdValue need to be checked.
#
#
def IsValidPcdValue(PcdValue):
for Char in PcdValue:
if Char == '\n' or Char == '\t' or Char == '\f':
return False
#
# <Boolean>
#
if IsValidFeatureFlagExp(PcdValue, True)[0]:
return True
#
# <Number> ::= {<Integer>} {<HexNumber>}
# <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
# <HexNumber> ::= "0x" <HexDigit>{1,}
# <HexDigit> ::= (a-fA-F0-9)
#
#
if IsValidHex(PcdValue):
return True
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
if ReIsValidIntegerSingle.match(PcdValue) is not None:
return True
ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
if ReIsValidIntegerMulti.match(PcdValue) is not None:
return True
#
# <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
# <StringType> ::= {<UnicodeString>} {<CString>}
@@ -609,7 +609,7 @@ def IsValidPcdValue(PcdValue):
IsTrue = True
if IsTrue:
return IsTrue
#
# <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
# <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
@@ -619,44 +619,44 @@ def IsValidPcdValue(PcdValue):
#
if IsValidCFormatGuid(PcdValue):
return True
ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
StringValue = PcdValue.strip().lstrip('{').rstrip('}')
ValueList = StringValue.split(',')
AllValidFlag = True
for ValueItem in ValueList:
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
#
#
# NList
#
AllValidFlag = True
ValueList = PcdValue.split(',')
for ValueItem in ValueList:
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
return False
## IsValidCVariableName
#
# Check whether the PcdValue is valid.
#
#
# @param VersionString: The PcdValue need to be checked.
#
#
def IsValidCVariableName(CName):
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIsValidCName.match(CName) is None:
return False
return True
## IsValidIdentifier
@@ -671,7 +671,7 @@ def IsValidIdentifier(Ident):
ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIdent.match(Ident) is None:
return False
return True
## IsValidDecVersionVal
@@ -682,10 +682,10 @@ def IsValidIdentifier(Ident):
#
def IsValidDecVersionVal(Ver):
ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
if ReVersion.match(Ver) is None:
return False
return True
@@ -699,7 +699,7 @@ def IsValidLibName(LibName):
ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
if not ReLibName.match(LibName):
return False
return True
# IsValidUserId

View File

@@ -1,12 +1,12 @@
## @file
# This file is used to define common parsing related functions used in parsing
# This file is used to define common parsing related functions used in parsing
# INF/DEC/DSC process
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -74,7 +74,7 @@ def GetBuildOption(String, File, LineNo= -1):
# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
@@ -99,7 +99,7 @@ def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
# [|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
@@ -148,7 +148,7 @@ def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <Value>[|<Type>|<MaximumDatumSize>]
# @param ContainerFile: The file which describes the pcd, used for error
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
@@ -176,7 +176,7 @@ def GetPcd(Item, Type, ContainerFile, LineNo= -1):
#
# @param Item: String as <PcdTokenSpaceGuidCName>
# .<TokenCName>|TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
@@ -200,7 +200,7 @@ def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
@@ -226,7 +226,7 @@ def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
@@ -253,7 +253,7 @@ def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
# |TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
@@ -273,7 +273,7 @@ def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
## GetComponent
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
@@ -408,7 +408,7 @@ def GetExec(String):
## GetComponents
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
# [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
@@ -531,7 +531,7 @@ def GetComponents(Lines, KeyValues, CommentCharacter):
#
# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class, used
# @param ContainerFile: The file which describes the library class, used
# for error report
#
def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
@@ -556,7 +556,7 @@ def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
#
# @param Item: String as <Filename>[|<Family>[|<TagName>
# [|<ToolCode>[|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class,
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetBinary(Item, ContainerFile, LineNo= -1):
@@ -580,7 +580,7 @@ def GetBinary(Item, ContainerFile, LineNo= -1):
#
# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfInf(Item):
@@ -594,7 +594,7 @@ def GetGuidsProtocolsPpisOfInf(Item):
#
# @param Item: String as <GuidCName>=<GuidValue>
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
@@ -625,7 +625,7 @@ def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
#
# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
@@ -936,7 +936,7 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
# <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
# {<CString>} {<UnicodeString>} {<CArray>}
#
# The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
# The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
# <UnicodeString>, <CArray> are subset of <AsciiString>.
#
ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
@@ -950,15 +950,15 @@ def MacroParser(Line, FileName, SectionType, FileLocalMacros):
return Name, Value
## GenSection
## GenSection
#
# generate section contents
#
# @param SectionName: indicate the name of the section, details refer to
# @param SectionName: indicate the name of the section, details refer to
# INF, DEC specs
# @param SectionDict: section statement dict, key is SectionAttrs(arch,
# moduletype or platform may exist as needed) list
# seperated by space,
# @param SectionDict: section statement dict, key is SectionAttrs(arch,
# moduletype or platform may exist as needed) list
# seperated by space,
# value is statement
#
def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
@@ -1004,10 +1004,10 @@ def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
return Content
## ConvertArchForInstall
# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
# Anything else, the case must be preserved
#
# @param Arch: the arch string that need to be converted, it should be stripped before pass in
# @param Arch: the arch string that need to be converted, it should be stripped before pass in
# @return: the arch string that get converted
#
def ConvertArchForInstall(Arch):

View File

@@ -3,9 +3,9 @@
#
# Copyright (c) 2014 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -148,9 +148,9 @@ def GetLanguageCode1766(LangName, File=None):
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()):
return LangName
else:
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
"Invalid RFC 1766 language code : %s" % LangName,
File)
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
@@ -167,11 +167,11 @@ def GetLanguageCode1766(LangName, File=None):
if Key == LangName[0:3].lower():
return Key
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## GetLanguageCode
#
# Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate
@@ -191,9 +191,9 @@ def GetLanguageCode(LangName, IsCompatibleMode, File):
return TempLangName
return LangName
else:
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
"Invalid RFC 1766 language code : %s" % LangName,
File)
if (LangName[0] == 'X' or LangName[0] == 'x') and LangName[1] == '-':
return LangName
@@ -212,9 +212,9 @@ def GetLanguageCode(LangName, IsCompatibleMode, File):
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
return LangName
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## FormatUniEntry
@@ -231,7 +231,7 @@ def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile):
PreFormatLength = 40
if len(StrTokenName) > PreFormatLength:
PreFormatLength = len(StrTokenName) + 1
for (Lang, Value) in TokenValueList:
for (Lang, Value) in TokenValueList:
if not Value or Lang == DT.TAB_LANGUAGE_EN_X:
continue
if Lang == '':
@@ -333,15 +333,15 @@ class UniFileClassObject(object):
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_OPEN_FAILURE,
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_OPEN_FAILURE,
"File read failure: %s" % str(Xstr),
ExtraData=File)
LineNo = GetLineNo(FileIn, Line, False)
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
"Wrong language definition",
ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
"Wrong language definition",
ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
File = File, Line = LineNo)
else:
LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
@@ -365,7 +365,7 @@ class UniFileClassObject(object):
if not IsLangInDef:
#
# The found STRING tokens will be added into new language string list
# so that the unique STRING identifier is reserved for all languages in the package list.
# so that the unique STRING identifier is reserved for all languages in the package list.
#
FirstLangName = self.LanguageDef[0][0]
if LangName != FirstLangName:
@@ -375,10 +375,10 @@ class UniFileClassObject(object):
OtherLang = Item.UseOtherLangDef
else:
OtherLang = FirstLangName
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
'',
Item.Referenced,
Item.Token,
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
'',
Item.Referenced,
Item.Token,
OtherLang))
self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
return True
@@ -395,7 +395,7 @@ class UniFileClassObject(object):
if Name != '':
MatchString = re.match('[A-Z0-9_]+', Name, re.UNICODE)
if MatchString is None or MatchString.end(0) != len(Name):
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
'The string token name %s in UNI file %s must be upper case character.' %(Name, self.File))
LanguageList = Item.split(u'#language ')
@@ -406,7 +406,7 @@ class UniFileClassObject(object):
Language = LanguageList[IndexI].split()[0]
#.replace(u'\r\n', u'')
Value = \
LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
@@ -424,7 +424,7 @@ class UniFileClassObject(object):
#
def PreProcess(self, File, IsIncludeFile=False):
if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=File.Path)
@@ -443,8 +443,8 @@ class UniFileClassObject(object):
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=File.Path)
#
# get the file header
#
@@ -467,7 +467,7 @@ class UniFileClassObject(object):
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd and FirstGenHeader:
self.UniFileHeader += Line + '\r\n'
continue
#
# Use unique identifier
#
@@ -486,7 +486,7 @@ class UniFileClassObject(object):
Line = Line.strip()
#
# Ignore comment line and empty line
#
#
if Line == u'' or Line.startswith(u'//'):
#
# Change the single line String entry flag status
@@ -528,7 +528,7 @@ class UniFileClassObject(object):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
@@ -556,16 +556,16 @@ class UniFileClassObject(object):
#
if Line.startswith(u'#string') and Line.find(u'#language') == -1:
MultiLineFeedExits = True
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.find(u'"') < 0:
MultiLineFeedExits = True
#
# Between Language entry and String entry can not contain line feed
#
if Line.startswith(u'#language') and len(Line.split()) == 2:
MultiLineFeedExits = True
#
# Between two String entry, can not contain line feed
#
@@ -588,7 +588,7 @@ class UniFileClassObject(object):
StringEntryExistsFlag = 0
Lines.append(Line)
#
# Convert string def format as below
#
@@ -601,11 +601,11 @@ class UniFileClassObject(object):
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if not IsIncludeFile and not Lines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_NO_SECTION_EXIST, \
ExtraData=File.Path)
ExtraData=File.Path)
NewLines = []
StrName = u''
@@ -615,7 +615,7 @@ class UniFileClassObject(object):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and len(StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)) == 4:
StringTokenList = StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)
if (StringTokenList[3].upper() in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP] and \
@@ -624,19 +624,19 @@ class UniFileClassObject(object):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if Line.count(u'#language') > 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_SEP_LANGENTRY_LINE % Line, \
ExtraData=File.Path)
ExtraData=File.Path)
if Line.startswith(u'//'):
continue
elif Line.startswith(u'#langdef'):
if len(Line.split()) == 2:
NewLines.append(Line)
continue
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
NewLines.append(Line[:Line.find(u'"')].strip())
NewLines.append(Line[Line.find(u'"'):])
else:
@@ -659,10 +659,10 @@ class UniFileClassObject(object):
if Line[Line.find(u'#language')-1] != ' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line.find(u'"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
@@ -684,11 +684,11 @@ class UniFileClassObject(object):
if Line[Line.find(u'#language')-1] != u' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line[Line.find(u'"')-1] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
@@ -698,11 +698,11 @@ class UniFileClassObject(object):
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
@@ -733,17 +733,17 @@ class UniFileClassObject(object):
else:
print(Line)
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if StrName and not StrName.split()[1].startswith(u'STR_'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
ExtraData=File.Path)
if StrName and not NewLines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNI_MISS_LANGENTRY % StrName, \
ExtraData=File.Path)
#
# Check Abstract, Description, BinaryAbstract and BinaryDescription order,
# should be Abstract, Description, BinaryAbstract, BinaryDesctiption
@@ -762,7 +762,7 @@ class UniFileClassObject(object):
BinaryDescriptionPosition = ExistStrNameList.index(StrName)
else:
DescriptionPosition = ExistStrNameList.index(StrName)
OrderList = sorted([AbstractPosition, DescriptionPosition])
BinaryOrderList = sorted([BinaryAbstractPosition, BinaryDescriptionPosition])
Min = OrderList[0]
@@ -774,25 +774,25 @@ class UniFileClassObject(object):
BinaryMax > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
ExtraData=File.Path)
elif BinaryAbstractPosition > -1:
if not(BinaryAbstractPosition > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
ExtraData=File.Path)
if DescriptionPosition > -1:
if not(DescriptionPosition == Max and AbstractPosition == Min and \
DescriptionPosition > AbstractPosition):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
ExtraData=File.Path)
if not self.UniFileHeader:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message = ST.ERR_NO_SOURCE_HEADER,
ExtraData=File.Path)
return NewLines
#
@@ -800,13 +800,13 @@ class UniFileClassObject(object):
#
def LoadUniFile(self, File = None):
if File is None:
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
Message='No unicode file is given',
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
Message='No unicode file is given',
ExtraData=File.Path)
self.File = File
#
# Process special char in file
#
@@ -849,10 +849,10 @@ class UniFileClassObject(object):
SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
if Line.find('"') > 0 or SecondLine.find('"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_DBLQUOTE_UNMATCHED,
ExtraData=File.Path)
Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
for IndexJ in range(IndexI + 2, len(Lines)):
@@ -894,11 +894,11 @@ class UniFileClassObject(object):
for LangNameItem in self.LanguageDef:
if Language == LangNameItem[0]:
break
if Language not in self.OrderedStringList:
self.OrderedStringList[Language] = []
self.OrderedStringDict[Language] = {}
IsAdded = True
if Name in self.OrderedStringDict[Language]:
IsAdded = False
@@ -906,38 +906,38 @@ class UniFileClassObject(object):
ItemIndexInList = self.OrderedStringDict[Language][Name]
Item = self.OrderedStringList[Language][ItemIndexInList]
Item.UpdateValue(Value)
Item.UseOtherLangDef = ''
Item.UseOtherLangDef = ''
if IsAdded:
Token = len(self.OrderedStringList[Language])
if Index == -1:
self.OrderedStringList[Language].append(StringDefClassObject(Name,
Value,
Referenced,
Token,
self.OrderedStringList[Language].append(StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Token
for LangName in self.LanguageDef:
#
# New STRING token will be added into all language string lists.
# so that the unique STRING identifier is reserved for all languages in the package list.
# so that the unique STRING identifier is reserved for all languages in the package list.
#
if LangName[0] != Language:
if UseOtherLangDef != '':
OtherLangDef = UseOtherLangDef
else:
OtherLangDef = Language
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
'',
Referenced,
Token,
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
'',
Referenced,
Token,
OtherLangDef))
self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
else:
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
Value,
Referenced,
Token,
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Index
@@ -1029,16 +1029,16 @@ class UniFileClassObject(object):
print(Item)
for Member in self.OrderedStringList[Item]:
print(str(Member))
#
# Read content from '!include' UNI file
# Read content from '!include' UNI file
#
def ReadIncludeUNIfile(self, FilaPath):
if self.File:
pass
if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath):
EdkLogger.Error("Unicode File Parser",
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=FilaPath)
try:

View File

@@ -2,11 +2,11 @@
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -141,8 +141,8 @@ def XmlElement(Dom, String):
## Get a single XML element using XPath style syntax.
#
# Similar with XmlElement, but do not strip all the leading and tailing space
# and newline, instead just remove the newline and spaces introduced by
# toprettyxml()
# and newline, instead just remove the newline and spaces introduced by
# toprettyxml()
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.

View File

@@ -4,11 +4,11 @@
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -17,4 +17,4 @@
'''
Xml
'''
'''

View File

@@ -4,11 +4,11 @@
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -17,4 +17,4 @@
'''
Library
'''
'''