BaseTools: Clean up source files

1. Do not use tab characters
2. No trailing white space in one line
3. All files must end with CRLF

Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Liming Gao <liming.gao@intel.com>
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Reviewed-by: Yonghong Zhu <yonghong.zhu@intel.com>
This commit is contained in:
Liming Gao
2018-07-05 17:40:04 +08:00
parent 39456d00f3
commit f7496d7173
289 changed files with 10647 additions and 10647 deletions

View File

@@ -2,7 +2,7 @@
from antlr3 import *
from antlr3.compat import set, frozenset
## @file
# The file defines the Lexer for C source files.
#
@@ -10,7 +10,7 @@ from antlr3.compat import set, frozenset
# This file is generated by running:
# java org.antlr.Tool C.g
#
# Copyright (c) 2009 - 2010, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
@@ -4341,7 +4341,7 @@ class CLexer(Lexer):
u"\12\uffff"
)
DFA25_transition = [
DFA.unpack(u"\1\2\1\uffff\12\1"),
DFA.unpack(u"\1\3\1\uffff\12\1\12\uffff\1\5\1\4\1\5\35\uffff\1\5"
@@ -4479,7 +4479,7 @@ class CLexer(Lexer):
u"\u0192\uffff"
)
DFA35_transition = [
DFA.unpack(u"\6\73\2\70\1\73\2\70\22\73\1\70\1\50\1\65\1\72\1\63"
u"\1\45\1\46\1\64\1\34\1\35\1\40\1\42\1\3\1\43\1\41\1\44\1\66\11"
@@ -4943,5 +4943,5 @@ class CLexer(Lexer):
# class definition for DFA #35
DFA35 = DFA

File diff suppressed because it is too large Load Diff

View File

@@ -563,17 +563,17 @@ class Check(object):
op = open(FullName).readlines()
FileLinesList = op
LineNo = 0
CurrentSection = MODEL_UNKNOWN
CurrentSection = MODEL_UNKNOWN
HeaderSectionLines = []
HeaderCommentStart = False
HeaderCommentStart = False
HeaderCommentEnd = False
for Line in FileLinesList:
LineNo = LineNo + 1
Line = Line.strip()
if (LineNo < len(FileLinesList) - 1):
NextLine = FileLinesList[LineNo].strip()
#
# blank line
#
@@ -600,8 +600,8 @@ class Check(object):
#
HeaderSectionLines.append((Line, LineNo))
HeaderCommentStart = True
continue
continue
#
# Collect Header content.
#
@@ -635,7 +635,7 @@ class Check(object):
if EccGlobalData.gConfig.HeaderCheckFileCommentEnd == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
# Check whether the function headers are followed Doxygen special documentation blocks in section 2.3.5
def DoxygenCheckFunctionHeader(self):
@@ -827,7 +827,7 @@ class Check(object):
for FilePath in FilePathList:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, OtherMsg="The Library Class [%s] is duplicated in '%s' line %s and line %s." % (Record[1], FilePath, Record[3], Record[4]), BelongsToTable='Dsc', BelongsToItem=Record[0])
# Check the header file in Include\Library directory whether be defined in the package DEC file.
def MetaDataFileCheckLibraryDefinedInDec(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryDefinedInDec == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
@@ -842,9 +842,9 @@ class Check(object):
if not LibraryDec:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, LibraryInInf):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, \
OtherMsg="The Library Class [%s] in %s line is not defined in the associated package file." % (LibraryInInf, Line),
OtherMsg="The Library Class [%s] in %s line is not defined in the associated package file." % (LibraryInInf, Line),
BelongsToTable='Inf', BelongsToItem=ID)
# Check whether an Inf file is specified in the FDF file, but not in the Dsc file, then the Inf file must be for a Binary module only
def MetaDataFileCheckBinaryInfInFdf(self):
if EccGlobalData.gConfig.MetaDataFileCheckBinaryInfInFdf == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
@@ -1244,7 +1244,7 @@ class Check(object):
group by A.ID
""" % (Table.Table, Table.Table, Model, Model)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ErrorID, Record[2]):
EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg="The %s value [%s] is used more than one time" % (Name.upper(), Record[2]), BelongsToTable=Table.Table, BelongsToItem=Record[0])

View File

@@ -1,7 +1,7 @@
## @file
# fragments of source file
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -161,5 +161,5 @@ class FunctionCalling:
self.FuncName = Name
self.ParamList = Param
self.StartPos = Begin
self.EndPos = End
self.EndPos = End

View File

@@ -47,7 +47,7 @@ from ParserWarning import Warning
T_CHAR_BACKSLASH, T_CHAR_DOUBLE_QUOTE, T_CHAR_SINGLE_QUOTE, T_CHAR_STAR, T_CHAR_HASH) = \
(' ', '\0', '\r', '\t', '\n', '/', '\\', '\"', '\'', '*', '#')
SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
(T_COMMENT_TWO_SLASH, T_COMMENT_SLASH_STAR) = (0, 1)
@@ -59,7 +59,7 @@ SEPERATOR_TUPLE = ('=', '|', ',', '{', '}')
#
# GetNext*** procedures mean these procedures will get next token first, then make judgement.
# Get*** procedures mean these procedures will make judgement on current token only.
#
#
class CodeFragmentCollector:
## The constructor
#
@@ -89,7 +89,7 @@ class CodeFragmentCollector:
SizeOfLastLine = NumberOfLines
if NumberOfLines > 0:
SizeOfLastLine = len(self.Profile.FileLinesList[-1])
if self.CurrentLineNumber == NumberOfLines and self.CurrentOffsetWithinLine >= SizeOfLastLine - 1:
return True
elif self.CurrentLineNumber > NumberOfLines:
@@ -111,7 +111,7 @@ class CodeFragmentCollector:
return True
else:
return False
## Rewind() method
#
# Reset file data buffer to the initial state
@@ -121,7 +121,7 @@ class CodeFragmentCollector:
def Rewind(self):
self.CurrentLineNumber = 1
self.CurrentOffsetWithinLine = 0
## __UndoOneChar() method
#
# Go back one char in the file buffer
@@ -129,9 +129,9 @@ class CodeFragmentCollector:
# @param self The object pointer
# @retval True Successfully go back one char
# @retval False Not able to go back one char as file beginning reached
#
#
def __UndoOneChar(self):
if self.CurrentLineNumber == 1 and self.CurrentOffsetWithinLine == 0:
return False
elif self.CurrentOffsetWithinLine == 0:
@@ -140,13 +140,13 @@ class CodeFragmentCollector:
else:
self.CurrentOffsetWithinLine -= 1
return True
## __GetOneChar() method
#
# Move forward one char in the file buffer
#
# @param self The object pointer
#
#
def __GetOneChar(self):
if self.CurrentOffsetWithinLine == len(self.Profile.FileLinesList[self.CurrentLineNumber - 1]) - 1:
self.CurrentLineNumber += 1
@@ -160,13 +160,13 @@ class CodeFragmentCollector:
#
# @param self The object pointer
# @retval Char Current char
#
#
def __CurrentChar(self):
CurrentChar = self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine]
# if CurrentChar > 255:
# raise Warning("Non-Ascii char found At Line %d, offset %d" % (self.CurrentLineNumber, self.CurrentOffsetWithinLine), self.FileName, self.CurrentLineNumber)
return CurrentChar
## __NextChar() method
#
# Get the one char pass the char pointed to by the file buffer pointer
@@ -179,7 +179,7 @@ class CodeFragmentCollector:
return self.Profile.FileLinesList[self.CurrentLineNumber][0]
else:
return self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine + 1]
## __SetCurrentCharValue() method
#
# Modify the value of current char
@@ -189,7 +189,7 @@ class CodeFragmentCollector:
#
def __SetCurrentCharValue(self, Value):
self.Profile.FileLinesList[self.CurrentLineNumber - 1][self.CurrentOffsetWithinLine] = Value
## __SetCharValue() method
#
# Modify the value of current char
@@ -199,7 +199,7 @@ class CodeFragmentCollector:
#
def __SetCharValue(self, Line, Offset, Value):
self.Profile.FileLinesList[Line - 1][Offset] = Value
## __CurrentLine() method
#
# Get the list that contains current line contents
@@ -209,7 +209,7 @@ class CodeFragmentCollector:
#
def __CurrentLine(self):
return self.Profile.FileLinesList[self.CurrentLineNumber - 1]
## __InsertComma() method
#
# Insert ',' to replace PP
@@ -218,24 +218,24 @@ class CodeFragmentCollector:
# @retval List current line contents
#
def __InsertComma(self, Line):
if self.Profile.FileLinesList[Line - 1][0] != T_CHAR_HASH:
BeforeHashPart = str(self.Profile.FileLinesList[Line - 1]).split(T_CHAR_HASH)[0]
if BeforeHashPart.rstrip().endswith(T_CHAR_COMMA) or BeforeHashPart.rstrip().endswith(';'):
return
if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(','):
return
if Line - 2 >= 0 and str(self.Profile.FileLinesList[Line - 2]).rstrip().endswith(';'):
return
if str(self.Profile.FileLinesList[Line]).lstrip().startswith(',') or str(self.Profile.FileLinesList[Line]).lstrip().startswith(';'):
return
self.Profile.FileLinesList[Line - 1].insert(self.CurrentOffsetWithinLine, ',')
## PreprocessFile() method
#
# Preprocess file contents, replace comments with spaces.
@@ -244,7 +244,7 @@ class CodeFragmentCollector:
# !include statement should be expanded at the same FileLinesList[CurrentLineNumber - 1]
#
# @param self The object pointer
#
#
def PreprocessFile(self):
self.Rewind()
@@ -256,14 +256,14 @@ class CodeFragmentCollector:
PPDirectiveObj = None
# HashComment in quoted string " " is ignored.
InString = False
InCharLiteral = False
InCharLiteral = False
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesListFromFile]
while not self.__EndOfFile():
if not InComment and self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE:
InString = not InString
if not InComment and self.__CurrentChar() == T_CHAR_SINGLE_QUOTE:
InCharLiteral = not InCharLiteral
# meet new line, then no longer in a comment for // and '#'
@@ -274,9 +274,9 @@ class CodeFragmentCollector:
PPExtend = True
else:
PPExtend = False
EndLinePos = (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
@@ -291,17 +291,17 @@ class CodeFragmentCollector:
PPDirectiveObj.EndPos = EndLinePos
FileProfile.PPDirectiveList.append(PPDirectiveObj)
PPDirectiveObj = None
if InString or InCharLiteral:
CurrentLine = "".join(self.__CurrentLine())
if CurrentLine.rstrip(T_CHAR_LF).rstrip(T_CHAR_CR).endswith(T_CHAR_BACKSLASH):
SlashIndex = CurrentLine.rindex(T_CHAR_BACKSLASH)
self.__SetCharValue(self.CurrentLineNumber, SlashIndex, T_CHAR_SPACE)
if InComment and not DoubleSlashComment and not HashComment:
CommentObj.Content += T_CHAR_LF
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
self.CurrentOffsetWithinLine = 0
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
CommentObj.Content += self.__CurrentChar()
@@ -315,7 +315,7 @@ class CodeFragmentCollector:
self.__GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
elif InComment:
if HashComment:
# // follows hash PP directive
if self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH:
@@ -341,7 +341,7 @@ class CodeFragmentCollector:
# check for '#' comment
elif self.__CurrentChar() == T_CHAR_HASH and not InString and not InCharLiteral:
InComment = True
HashComment = True
HashComment = True
PPDirectiveObj = PP_Directive('', (self.CurrentLineNumber, self.CurrentOffsetWithinLine), None)
# check for /* comment start
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
@@ -355,9 +355,9 @@ class CodeFragmentCollector:
InComment = True
else:
self.__GetOneChar()
EndLinePos = (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
if InComment and DoubleSlashComment:
CommentObj.EndPos = EndLinePos
FileProfile.CommentList.append(CommentObj)
@@ -378,14 +378,14 @@ class CodeFragmentCollector:
PPDirectiveObj = None
# HashComment in quoted string " " is ignored.
InString = False
InCharLiteral = False
InCharLiteral = False
self.Profile.FileLinesList = [list(s) for s in self.Profile.FileLinesListFromFile]
while not self.__EndOfFile():
if not InComment and self.__CurrentChar() == T_CHAR_DOUBLE_QUOTE:
InString = not InString
if not InComment and self.__CurrentChar() == T_CHAR_SINGLE_QUOTE:
InCharLiteral = not InCharLiteral
# meet new line, then no longer in a comment for // and '#'
@@ -396,9 +396,9 @@ class CodeFragmentCollector:
PPExtend = True
else:
PPExtend = False
EndLinePos = (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
@@ -413,17 +413,17 @@ class CodeFragmentCollector:
PPDirectiveObj.EndPos = EndLinePos
FileProfile.PPDirectiveList.append(PPDirectiveObj)
PPDirectiveObj = None
if InString or InCharLiteral:
CurrentLine = "".join(self.__CurrentLine())
if CurrentLine.rstrip(T_CHAR_LF).rstrip(T_CHAR_CR).endswith(T_CHAR_BACKSLASH):
SlashIndex = CurrentLine.rindex(T_CHAR_BACKSLASH)
self.__SetCharValue(self.CurrentLineNumber, SlashIndex, T_CHAR_SPACE)
if InComment and not DoubleSlashComment and not HashComment:
CommentObj.Content += T_CHAR_LF
self.CurrentLineNumber += 1
self.CurrentOffsetWithinLine = 0
self.CurrentOffsetWithinLine = 0
# check for */ comment end
elif InComment and not DoubleSlashComment and not HashComment and self.__CurrentChar() == T_CHAR_STAR and self.__NextChar() == T_CHAR_SLASH:
CommentObj.Content += self.__CurrentChar()
@@ -437,7 +437,7 @@ class CodeFragmentCollector:
self.__GetOneChar()
InComment = False
# set comments to spaces
elif InComment:
elif InComment:
if HashComment:
# // follows hash PP directive
if self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_SLASH:
@@ -463,7 +463,7 @@ class CodeFragmentCollector:
# check for '#' comment
elif self.__CurrentChar() == T_CHAR_HASH and not InString and not InCharLiteral:
InComment = True
HashComment = True
HashComment = True
PPDirectiveObj = PP_Directive('', (self.CurrentLineNumber, self.CurrentOffsetWithinLine), None)
# check for /* comment start
elif self.__CurrentChar() == T_CHAR_SLASH and self.__NextChar() == T_CHAR_STAR:
@@ -479,7 +479,7 @@ class CodeFragmentCollector:
self.__GetOneChar()
EndLinePos = (self.CurrentLineNumber, self.CurrentOffsetWithinLine)
if InComment and DoubleSlashComment:
CommentObj.EndPos = EndLinePos
FileProfile.CommentList.append(CommentObj)
@@ -507,7 +507,7 @@ class CodeFragmentCollector:
tStream = antlr3.CommonTokenStream(lexer)
parser = CParser(tStream)
parser.translation_unit()
def ParseFileWithClearedPPDirective(self):
self.PreprocessFileWithClear()
# restore from ListOfList to ListOfString
@@ -520,7 +520,7 @@ class CodeFragmentCollector:
tStream = antlr3.CommonTokenStream(lexer)
parser = CParser(tStream)
parser.translation_unit()
def CleanFileProfileBuffer(self):
FileProfile.CommentList = []
FileProfile.PPDirectiveList = []
@@ -531,61 +531,61 @@ class CodeFragmentCollector:
FileProfile.StructUnionDefinitionList = []
FileProfile.TypedefDefinitionList = []
FileProfile.FunctionCallingList = []
def PrintFragments(self):
print('################# ' + self.FileName + '#####################')
print('/****************************************/')
print('/*************** COMMENTS ***************/')
print('/****************************************/')
for comment in FileProfile.CommentList:
print(str(comment.StartPos) + comment.Content)
print('/****************************************/')
print('/********* PREPROCESS DIRECTIVES ********/')
print('/****************************************/')
for pp in FileProfile.PPDirectiveList:
print(str(pp.StartPos) + pp.Content)
print('/****************************************/')
print('/********* VARIABLE DECLARATIONS ********/')
print('/****************************************/')
for var in FileProfile.VariableDeclarationList:
print(str(var.StartPos) + var.Modifier + ' '+ var.Declarator)
print('/****************************************/')
print('/********* FUNCTION DEFINITIONS *********/')
print('/****************************************/')
for func in FileProfile.FunctionDefinitionList:
print(str(func.StartPos) + func.Modifier + ' '+ func.Declarator + ' ' + str(func.NamePos))
print('/****************************************/')
print('/************ ENUMERATIONS **************/')
print('/****************************************/')
for enum in FileProfile.EnumerationDefinitionList:
print(str(enum.StartPos) + enum.Content)
print('/****************************************/')
print('/*********** STRUCTS/UNIONS *************/')
print('/****************************************/')
for su in FileProfile.StructUnionDefinitionList:
print(str(su.StartPos) + su.Content)
print('/****************************************/')
print('/********* PREDICATE EXPRESSIONS ********/')
print('/****************************************/')
for predexp in FileProfile.PredicateExpressionList:
print(str(predexp.StartPos) + predexp.Content)
print('/****************************************/')
print('/************** TYPEDEFS ****************/')
print('/****************************************/')
for typedef in FileProfile.TypedefDefinitionList:
print(str(typedef.StartPos) + typedef.ToType)
if __name__ == "__main__":
collector = CodeFragmentCollector(sys.argv[1])
collector.PreprocessFile()
print("For Test.")

View File

@@ -215,7 +215,7 @@ class Configuration(object):
self.HeaderCheckCFileCommentReferenceFormat = 1
# Check whether C File header Comment have the License immediately after the ""Copyright"" line
self.HeaderCheckCFileCommentLicenseFormat = 1
## C Function Layout Checking
self.CFunctionLayoutCheckAll = 0
@@ -352,7 +352,7 @@ class Configuration(object):
self.MetaDataFileCheckModuleFilePpiFormat = 1
# Check Pcd Format in INF files
self.MetaDataFileCheckModuleFilePcdFormat = 1
# Check UNI file
self.UniCheckAll = 0
# Check INF or DEC file whether defined the localized information in the associated UNI file.
@@ -374,16 +374,16 @@ class Configuration(object):
# The directory listed here will not be parsed, split with ','
self.SkipDirList = []
# The file listed here will not be parsed, split with ','
self.SkipFileList = []
# A list for binary file ext name
self.BinaryExtList = []
# A list for only scanned folders
self.ScanOnlyDirList = []
# A list for Copyright format
self.Copyright = []

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to be the main entrance of ECC tool
#
# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -51,7 +51,7 @@ class Ecc(object):
# Version and Copyright
self.VersionNumber = ("1.0" + " Build " + gBUILD_VERSION)
self.Version = "%prog Version " + self.VersionNumber
self.Copyright = "Copyright (c) 2009 - 2016, Intel Corporation All rights reserved."
self.Copyright = "Copyright (c) 2009 - 2018, Intel Corporation All rights reserved."
self.InitDefaultConfigIni()
self.OutputFile = 'output.txt'
@@ -66,17 +66,17 @@ class Ecc(object):
# Parse the options and args
self.ParseOption()
EdkLogger.info(time.strftime("%H:%M:%S, %b.%d %Y ", time.localtime()) + "[00:00]" + "\n")
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
@@ -90,11 +90,11 @@ class Ecc(object):
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
@@ -104,7 +104,7 @@ class Ecc(object):
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
EdkLogger.info("Loading ECC configuration ... done")
# Generate checkpoints list
EccGlobalData.gConfig = Configuration(self.ConfigFile)
@@ -120,11 +120,11 @@ class Ecc(object):
# Get files real name in workspace dir
#
GlobalData.gAllFiles = DirCache(GlobalData.gWorkspace)
# Build ECC database
# self.BuildDatabase()
self.DetectOnlyScanDirs()
# Start to check
self.Check()
@@ -160,8 +160,8 @@ class Ecc(object):
EdkLogger.error("ECC", BuildToolError.OPTION_VALUE_INVALID, ExtraData="Use -f option need to fill specific folders in config.ini file")
else:
self.BuildDatabase()
## BuildDatabase
#
# Build the database for target
@@ -172,7 +172,7 @@ class Ecc(object):
EccGlobalData.gDb.TblReport.Create()
# Build database
if self.IsInit:
if self.IsInit:
if self.ScanMetaData:
EdkLogger.quiet("Building database for Meta Data File ...")
self.BuildMetaDataFileDatabase(SpeciDirs)
@@ -198,7 +198,7 @@ class Ecc(object):
if SpecificDirs is None:
ScanFolders.append(EccGlobalData.gTarget)
else:
for specificDir in SpecificDirs:
for specificDir in SpecificDirs:
ScanFolders.append(os.path.join(EccGlobalData.gTarget, specificDir))
EdkLogger.quiet("Building database for meta data files ...")
Op = open(EccGlobalData.gConfig.MetaDataFileCheckPathOfGenerateFileList, 'w+')
@@ -219,7 +219,7 @@ class Ecc(object):
# symlinks to directories are treated as directories
Dirs.remove(Dir)
Dirs.append(Dirname)
for File in Files:
if len(File) > 4 and File[-4:].upper() == ".DEC":
Filename = os.path.normpath(os.path.join(Root, File))

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to save global datas used by ECC tool
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,4 +24,4 @@ gIdentifierTableList = []
gCFileList = []
gHFileList = []
gUFileList = []
gException = None
gException = None

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to parse exception items found by ECC tool
#
# Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,12 +24,12 @@ class ExceptionXml(object):
self.KeyWord = ''
self.ErrorID = ''
self.FilePath = ''
def FromXml(self, Item, Key):
self.KeyWord = XmlElement(Item, '%s/KeyWord' % Key)
self.ErrorID = XmlElement(Item, '%s/ErrorID' % Key)
self.FilePath = os.path.normpath(XmlElement(Item, '%s/FilePath' % Key))
def __str__(self):
return 'ErrorID = %s KeyWord = %s FilePath = %s' %(self.ErrorID, self.KeyWord, self.FilePath)
@@ -37,22 +37,22 @@ class ExceptionXml(object):
class ExceptionListXml(object):
def __init__(self):
self.List = []
def FromXmlFile(self, FilePath):
XmlContent = XmlParseFile(FilePath)
for Item in XmlList(XmlContent, '/ExceptionList/Exception'):
Exp = ExceptionXml()
Exp.FromXml(Item, 'Exception')
self.List.append(Exp)
def ToList(self):
RtnList = []
for Item in self.List:
#RtnList.append((Item.ErrorID, Item.KeyWord, Item.FilePath))
RtnList.append((Item.ErrorID, Item.KeyWord))
return RtnList
def __str__(self):
RtnStr = ''
if self.List:
@@ -71,7 +71,7 @@ class ExceptionCheck(object):
if FilePath and os.path.isfile(FilePath):
self.ExceptionListXml.FromXmlFile(FilePath)
self.ExceptionList = self.ExceptionListXml.ToList()
def IsException(self, ErrorID, KeyWord, FileID=-1):
if (str(ErrorID), KeyWord.replace('\r\n', '\n')) in self.ExceptionList:
return True

View File

@@ -1,7 +1,7 @@
## @file
# fragments of source file
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -36,7 +36,7 @@ FunctionCallingList = []
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
@@ -54,5 +54,5 @@ class FileProfile :
except IOError:
raise Warning("Error when opening file %s" % FileName)

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to define common parser functions for meta-data
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -87,16 +87,16 @@ def GetTableList(FileModelList, Table, Db):
# @param FileName: FileName of the comment
#
def ParseHeaderCommentSection(CommentList, FileName = None):
Abstract = ''
Description = ''
Copyright = ''
License = ''
EndOfLine = "\n"
STR_HEADER_COMMENT_START = "@file"
#
# used to indicate the state of processing header comment section of dec,
# used to indicate the state of processing header comment section of dec,
# inf files
#
HEADER_COMMENT_NOT_STARTED = -1
@@ -117,11 +117,11 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
if _IsCopyrightLine(Line):
Last = Index
break
for Item in CommentList:
Line = Item[0]
LineNo = Item[1]
if not Line.startswith('#') and Line:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
@@ -131,14 +131,14 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
Comment = CleanString2(Line)[1]
Comment = Comment.strip()
#
# if there are blank lines between License or Description, keep them as they would be
# if there are blank lines between License or Description, keep them as they would be
# indication of different block; or in the position that Abstract should be, also keep it
# as it indicates that no abstract
#
if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
continue
if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
if Comment.startswith(STR_HEADER_COMMENT_START):
HeaderCommentStage = HEADER_COMMENT_ABSTRACT
@@ -152,39 +152,39 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
if not Comment:
Abstract = ''
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif _IsCopyrightLine(Comment):
elif _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
else:
Abstract += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
#
# in case there is no description
#
if _IsCopyrightLine(Comment):
#
if _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Description += Comment + EndOfLine
Description += Comment + EndOfLine
elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
if _IsCopyrightLine(Comment):
if _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
else:
#
# Contents after copyright line are license, those non-copyright lines in between
# copyright line will be discarded
# copyright line will be discarded
#
if LineNo > Last:
if License:
License += EndOfLine
License += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_LICENSE
HeaderCommentStage = HEADER_COMMENT_LICENSE
else:
if not Comment and not License:
continue
License += Comment + EndOfLine
if not Copyright.strip():
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
@@ -198,19 +198,19 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
for Result in ResultSet:
Msg = 'Header comment section must have license information'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
if not Abstract.strip() or Abstract.find('Component description file') > -1:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'Header comment section must have Abstract information.'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
#
# @param LineContent: the line need to be checked
# @return: True if current line is copyright line, False else
@@ -218,11 +218,11 @@ def ParseHeaderCommentSection(CommentList, FileName = None):
def _IsCopyrightLine (LineContent):
LineContent = LineContent.upper()
Result = False
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
return Result
@@ -232,7 +232,7 @@ def _IsCopyrightLine (LineContent):
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter='#', AllowCppStyleComment=False):

View File

@@ -92,7 +92,7 @@ def ParseMacro(Parser):
elif (Name in self._FileLocalMacros) and (self._FileLocalMacros[Name] != Value):
EdkLogger.error('Parser', FORMAT_INVALID, "EDK_GLOBAL defined a macro with the same name and different value as one defined by 'DEFINE'",
ExtraData=self._CurrentLine, File=self.MetaFile, Line=self._LineIndex+1)
self._ValueList = [Type, Name, Value]
return MacroParser
@@ -334,7 +334,7 @@ class MetaFileParser(object):
self._ValueList = [ReplaceMacro(Value, self._Macros) for Value in self._ValueList]
Name, Value = self._ValueList[1], self._ValueList[2]
# Sometimes, we need to make differences between EDK and EDK2 modules
# Sometimes, we need to make differences between EDK and EDK2 modules
if Name == 'INF_VERSION':
try:
self._Version = int(Value, 0)
@@ -354,7 +354,7 @@ class MetaFileParser(object):
UniFile = os.path.join(os.path.dirname(self.MetaFile), Value)
if os.path.exists(UniFile):
self._UniObj = UniParser(UniFile, IsExtraUni=False, IsModuleUni=False)
if isinstance(self, InfParser) and self._Version < 0x00010005:
# EDK module allows using defines as macros
self._FileLocalMacros[Name] = Value
@@ -390,7 +390,7 @@ class MetaFileParser(object):
return Macros
## Get section Macros that are applicable to current line, which may come from other sections
## Get section Macros that are applicable to current line, which may come from other sections
## that share the same name while scope is wider
def _GetApplicableSectionMacro(self):
Macros = {}
@@ -473,7 +473,7 @@ class InfParser(MetaFileParser):
self.FileID = FileID
else:
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_INF)
# parse the file line by line
IsFindBlockComment = False
@@ -591,7 +591,7 @@ class InfParser(MetaFileParser):
)
Usage = ''
if IsFindBlockComment:
EdkLogger.error("Parser", FORMAT_INVALID, "Open block comments (starting with /*) are expected to end with */",
EdkLogger.error("Parser", FORMAT_INVALID, "Open block comments (starting with /*) are expected to end with */",
File=self.MetaFile)
self._Done()
@@ -818,7 +818,7 @@ class DscParser(MetaFileParser):
# the owner item
#
self._IdMapping = {-1:-1}
self.TblFile = EccGlobalData.gDb.TblFile
self.FileID = -1
@@ -838,8 +838,8 @@ class DscParser(MetaFileParser):
self.FileID = FileID
else:
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_DSC)
for Index in range(0, len(Content)):
Line = CleanString(Content[Index])
# skip empty line
@@ -850,7 +850,7 @@ class DscParser(MetaFileParser):
self._LineIndex = Index
if self._InSubsection and self._Owner[-1] == -1:
self._Owner.append(self._LastItem)
# section header
if Line[0] == TAB_SECTION_START and Line[-1] == TAB_SECTION_END:
self._SectionType = MODEL_META_DATA_SECTION_HEADER
@@ -960,7 +960,7 @@ class DscParser(MetaFileParser):
elif self._From > 0:
EdkLogger.error('Parser', FORMAT_INVALID,
"No '!include' allowed in included file",
ExtraData=self._CurrentLine, File=self.MetaFile,
ExtraData=self._CurrentLine, File=self.MetaFile,
Line=self._LineIndex+1)
#
@@ -1154,7 +1154,7 @@ class DscParser(MetaFileParser):
MODEL_META_DATA_USER_EXTENSION : self._Skip,
MODEL_META_DATA_CONDITIONAL_STATEMENT_ERROR : self._Skip,
}
self._RawTable = self._Table
self._Table = MetaFileStorage(self._RawTable.Cur, self.MetaFile, MODEL_FILE_DSC, True)
self._DirectiveStack = []
@@ -1184,7 +1184,7 @@ class DscParser(MetaFileParser):
try:
Processer[self._ItemType]()
except EvaluationException as Excpt:
#
#
# Only catch expression evaluation error here. We need to report
# the precise number of line on which the error occurred
#
@@ -1194,11 +1194,11 @@ class DscParser(MetaFileParser):
# Line=self._LineIndex+1)
except MacroException as Excpt:
EdkLogger.error('Parser', FORMAT_INVALID, str(Excpt),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex+1)
if self._ValueList is None:
continue
continue
NewOwner = self._IdMapping.get(Owner, -1)
self._Enabled = int((not self._DirectiveEvalStack) or (False not in self._DirectiveEvalStack))
@@ -1221,7 +1221,7 @@ class DscParser(MetaFileParser):
self._IdMapping[Id] = self._LastItem
RecordList = self._Table.GetAll()
self._RawTable.Drop()
self._Table.Drop()
for Record in RecordList:
@@ -1255,7 +1255,7 @@ class DscParser(MetaFileParser):
# Don't use PCD with different values.
if Name in self._Symbols and self._Symbols[Name] != Value:
self._Symbols.pop(Name)
continue
continue
self._Symbols[Name] = Value
Records = self._RawTable.Query(MODEL_PCD_FIXED_AT_BUILD, BelongsToItem=-1.0)
@@ -1263,12 +1263,12 @@ class DscParser(MetaFileParser):
Value, DatumType, MaxDatumSize = AnalyzePcdData(Value)
# Only use PCD whose value is straitforward (no macro and PCD)
if self.SymbolPattern.findall(Value):
continue
continue
Name = TokenSpaceGuid+'.'+PcdName
# Don't use PCD with different values.
if Name in self._Symbols and self._Symbols[Name] != Value:
self._Symbols.pop(Name)
continue
continue
self._Symbols[Name] = Value
def __ProcessDefine(self):
@@ -1288,13 +1288,13 @@ class DscParser(MetaFileParser):
SectionLocalMacros[Name] = Value
elif self._ItemType == MODEL_META_DATA_GLOBAL_DEFINE:
GlobalData.gEdkGlobal[Name] = Value
#
# Keyword in [Defines] section can be used as Macros
#
if (self._ItemType == MODEL_META_DATA_HEADER) and (self._SectionType == MODEL_META_DATA_HEADER):
self._FileLocalMacros[Name] = Value
self._ValueList = [Type, Name, Value]
def __ProcessDirective(self):
@@ -1309,12 +1309,12 @@ class DscParser(MetaFileParser):
EdkLogger.debug(EdkLogger.DEBUG_5, str(Exc), self._ValueList[1])
Result = False
except WrnExpression as Excpt:
#
#
# Catch expression evaluation warning here. We need to report
# the precise number of line and return the evaluation result
#
EdkLogger.warn('Parser', "Suspicious expression: %s" % str(Excpt),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
File=self._FileWithError, ExtraData=' '.join(self._ValueList),
Line=self._LineIndex+1)
Result = Excpt.result
except BadExpression as Exc:
@@ -1365,14 +1365,14 @@ class DscParser(MetaFileParser):
#
elif "ECP_SOURCE" in GlobalData.gCommandLineDefines.keys():
__IncludeMacros['ECP_SOURCE'] = GlobalData.gCommandLineDefines['ECP_SOURCE']
__IncludeMacros['EFI_SOURCE'] = GlobalData.gGlobalDefines['EFI_SOURCE']
__IncludeMacros['EDK_SOURCE'] = GlobalData.gGlobalDefines['EDK_SOURCE']
#
# Allow using MACROs comes from [Defines] section to keep compatible.
# Allow using MACROs comes from [Defines] section to keep compatible.
#
__IncludeMacros.update(self._Macros)
IncludedFile = NormPath(ReplaceMacro(self._ValueList[1], __IncludeMacros, RaiseError=True))
#
# First search the include file under the same directory as DSC file
@@ -1386,14 +1386,14 @@ class DscParser(MetaFileParser):
IncludedFile1 = PathClass(IncludedFile, GlobalData.gWorkspace)
ErrorCode, ErrorInfo2 = IncludedFile1.Validate()
if ErrorCode != 0:
EdkLogger.error('parser', ErrorCode, File=self._FileWithError,
EdkLogger.error('parser', ErrorCode, File=self._FileWithError,
Line=self._LineIndex+1, ExtraData=ErrorInfo1 + "\n"+ ErrorInfo2)
self._FileWithError = IncludedFile1
IncludedFileTable = MetaFileStorage(self._Table.Cur, IncludedFile1, MODEL_FILE_DSC, True)
Owner = self._Content[self._ContentIndex-1][0]
Parser = DscParser(IncludedFile1, self._FileType, IncludedFileTable,
Parser = DscParser(IncludedFile1, self._FileType, IncludedFileTable,
Owner=Owner, From=Owner)
# set the parser status with current status
@@ -1417,7 +1417,7 @@ class DscParser(MetaFileParser):
self._Content.pop(self._ContentIndex-1)
self._ValueList = None
self._ContentIndex -= 1
def __ProcessSkuId(self):
self._ValueList = [ReplaceMacro(Value, self._Macros, RaiseError=True)
for Value in self._ValueList]
@@ -1434,22 +1434,22 @@ class DscParser(MetaFileParser):
# PCD value can be an expression
#
if len(ValueList) > 1 and ValueList[1] == TAB_VOID:
PcdValue = ValueList[0]
PcdValue = ValueList[0]
try:
ValueList[0] = ValueExpression(PcdValue, self._Macros)(True)
except WrnExpression as Value:
ValueList[0] = Value.result
ValueList[0] = Value.result
else:
PcdValue = ValueList[-1]
try:
ValueList[-1] = ValueExpression(PcdValue, self._Macros)(True)
except WrnExpression as Value:
ValueList[-1] = Value.result
if ValueList[-1] == 'True':
ValueList[-1] = '1'
if ValueList[-1] == 'False':
ValueList[-1] = '0'
ValueList[-1] = '0'
self._ValueList[2] = '|'.join(ValueList)
@@ -1548,7 +1548,7 @@ class DecParser(MetaFileParser):
self.FileID = FileID
else:
self.FileID = self.TblFile.InsertFile(Filename, MODEL_FILE_DEC)
for Index in range(0, len(Content)):
Line, Comment = CleanString2(Content[Index])
self._CurrentLine = Line
@@ -1750,19 +1750,19 @@ class DecParser(MetaFileParser):
" (<TokenSpaceGuidCName>.<PcdCName>|<DefaultValue>|<DatumType>|<Token>)",
File=self.MetaFile, Line=self._LineIndex+1)
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(TokenList[1])
# Has VOID* type string, may contain "|" character in the string.
# Has VOID* type string, may contain "|" character in the string.
if len(PtrValue) != 0:
ptrValueList = re.sub(ValueRe, '', TokenList[1])
ValueList = GetSplitValueList(ptrValueList)
ValueList[0] = PtrValue[0]
else:
ValueList = GetSplitValueList(TokenList[1])
# check if there's enough datum information given
if len(ValueList) != 3:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid PCD Datum information given",
@@ -1792,7 +1792,7 @@ class DecParser(MetaFileParser):
if not IsValid:
EdkLogger.error('Parser', FORMAT_INVALID, Cause, ExtraData=self._CurrentLine,
File=self.MetaFile, Line=self._LineIndex+1)
if EccGlobalData.gConfig.UniCheckPCDInfo == '1' or EccGlobalData.gConfig.UniCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# check Description, Prompt information
PatternDesc = re.compile('##\s*([\x21-\x7E\s]*)', re.S)
@@ -1903,7 +1903,7 @@ class DecParser(MetaFileParser):
## Fdf
#
# This class defined the structure used in Fdf object
#
#
# @param Filename: Input value for Ffilename of Fdf file, default is None
# @param WorkspaceDir: Input value for current workspace directory, default is None
#
@@ -1911,7 +1911,7 @@ class Fdf(object):
def __init__(self, Filename = None, IsToDatabase = False, WorkspaceDir = None, Database = None):
self.WorkspaceDir = WorkspaceDir
self.IsToDatabase = IsToDatabase
self.Cur = Database.Cur
self.TblFile = Database.TblFile
self.TblFdf = Database.TblFdf
@@ -1938,15 +1938,15 @@ class Fdf(object):
self.FileList[Filename] = FileID
return self.FileList[Filename]
## Load Fdf file
#
# Load the file if it exists
#
# @param Filename: Input value for filename of Fdf file
#
def LoadFdfFile(self, Filename):
def LoadFdfFile(self, Filename):
FileList = []
#
# Parse Fdf file
@@ -1991,7 +1991,7 @@ class UniParser(object):
self.FileIn = None
self.Missing = []
self.__read()
def __read(self):
try:
self.FileIn = CodecOpenLongFilePath(self.FilePath, Mode='rb', Encoding='utf_8').read()
@@ -2001,7 +2001,7 @@ class UniParser(object):
self.FileIn = CodecOpenLongFilePath(self.FilePath, Mode='rb', Encoding='utf_16_le').read()
except IOError:
self.FileIn = ""
def Start(self):
if self.IsModuleUni:
if self.IsExtraUni:
@@ -2021,7 +2021,7 @@ class UniParser(object):
self.PrintLog('STR_PACKAGE_ABSTRACT', PackageAbstract)
PackageDescription = self.CheckKeyValid('STR_PACKAGE_DESCRIPTION')
self.PrintLog('STR_PACKAGE_DESCRIPTION', PackageDescription)
def CheckKeyValid(self, Key, Contents=None):
if not Contents:
Contents = self.FileIn
@@ -2029,7 +2029,7 @@ class UniParser(object):
if KeyPattern.search(Contents):
return True
return False
def CheckPcdInfo(self, PcdCName):
PromptKey = 'STR_%s_PROMPT' % PcdCName.replace('.', '_')
PcdPrompt = self.CheckKeyValid(PromptKey)
@@ -2037,7 +2037,7 @@ class UniParser(object):
HelpKey = 'STR_%s_HELP' % PcdCName.replace('.', '_')
PcdHelp = self.CheckKeyValid(HelpKey)
self.PrintLog(HelpKey, PcdHelp)
def PrintLog(self, Key, Value):
if not Value and Key not in self.Missing:
Msg = '%s is missing in the %s file.' % (Key, self.FileName)

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to create/update/query/erase a meta file table
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -25,7 +25,7 @@ from CommonDataClass.DataClass import MODEL_FILE_DSC, MODEL_FILE_DEC, MODEL_FILE
MODEL_FILE_OTHERS
class MetaFileTable(Table):
## Constructor
## Constructor
def __init__(self, Cursor, MetaFile, FileType, TableName, Temporary = False):
self.MetaFile = MetaFile
self.TblFile = EccGlobalData.gDb.TblFile
@@ -88,30 +88,30 @@ class ModuleTable(MetaFileTable):
BelongsToItem=-1, BelongsToFile = -1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0, Usage=''):
(Value1, Value2, Value3, Usage, Scope1, Scope2) = ConvertToSqlString((Value1, Value2, Value3, Usage, Scope1, Scope2))
return Table.Insert(
self,
Model,
Value1,
Value2,
Value3,
Usage,
Scope1,
self,
Model,
Value1,
Value2,
Value3,
Usage,
Scope1,
Scope2,
BelongsToItem,
BelongsToFile,
StartLine,
StartColumn,
EndLine,
EndColumn,
BelongsToFile,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
)
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
# @param Platform The Platform attribute of Record
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
# @param Platform The Platform attribute of Record
#
# @retval: A recordSet of all found records
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None, Platform=None):
ConditionString = "Model=%s AND Enabled>=0" % Model
@@ -171,28 +171,28 @@ class PackageTable(MetaFileTable):
BelongsToItem=-1, BelongsToFile = -1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0):
(Value1, Value2, Value3, Scope1, Scope2) = ConvertToSqlString((Value1, Value2, Value3, Scope1, Scope2))
return Table.Insert(
self,
Model,
Value1,
Value2,
Value3,
Scope1,
self,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
BelongsToFile,
StartLine,
StartColumn,
EndLine,
EndColumn,
BelongsToFile,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
)
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
#
# @retval: A recordSet of all found records
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None):
ConditionString = "Model=%s AND Enabled>=0" % Model
@@ -252,32 +252,32 @@ class PlatformTable(MetaFileTable):
FromItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=1):
(Value1, Value2, Value3, Scope1, Scope2) = ConvertToSqlString((Value1, Value2, Value3, Scope1, Scope2))
return Table.Insert(
self,
Model,
Value1,
Value2,
Value3,
Scope1,
self,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
BelongsToItem,
BelongsToFile,
FromItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
)
## Query table
#
# @param Model: The Model of Record
# @param Model: The Model of Record
# @param Scope1: Arch of a Dsc item
# @param Scope2: Module type of a Dsc item
# @param BelongsToItem: The item belongs to which another item
# @param FromItem: The item belongs to which dsc file
#
# @retval: A recordSet of all found records
# @retval: A recordSet of all found records
#
def Query(self, Model, Scope1=None, Scope2=None, BelongsToItem=None, FromItem=None):
ConditionString = "Model=%s AND Enabled>0" % Model

View File

@@ -1,7 +1,7 @@
## @file
# This file is used to be the warning class of ECC tool
#
# Copyright (c) 2009 - 2010, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -27,4 +27,4 @@ class Warning (Exception):
self.message = Str
self.FileName = File
self.LineNumber = Line
self.ToolName = 'ECC PP'
self.ToolName = 'ECC PP'

View File

@@ -2,7 +2,7 @@
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -33,7 +33,7 @@ def CreateXmlElement(Name, String, NodeList, AttributeList):
Element = Doc.createElement(Name)
if String != '' and String is not None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if isinstance(Item, type([])):
Key = Item[0]
@@ -49,7 +49,7 @@ def CreateXmlElement(Name, String, NodeList, AttributeList):
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.

View File

@@ -4,11 +4,11 @@
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
@@ -17,4 +17,4 @@
'''
Xml
'''
'''

View File

@@ -2349,13 +2349,13 @@ def CheckFileHeaderDoxygenComments(FullFileName):
if (len(CommentStrListTemp) <= 1):
# For Mac
CommentStrListTemp = CommentStr.split('\r')
# Skip the content before the file header
# Skip the content before the file header
for CommentLine in CommentStrListTemp:
if CommentLine.strip().startswith('/** @file'):
FileStartFlag = True
if FileStartFlag == True:
CommentStrList.append(CommentLine)
ID = Result[1]
Index = 0
if CommentStrList and CommentStrList[0].strip().startswith('/** @file'):
@@ -2378,7 +2378,7 @@ def CheckFileHeaderDoxygenComments(FullFileName):
if EccGlobalData.gConfig.HeaderCheckCFileCommentStartSpacesNum == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
if CommentLine.startswith('/** @file') == False and CommentLine.startswith('**/') == False and CommentLine.strip() and CommentLine.startswith(' ') == False:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'File header comment content should start with two spaces at each line', FileTable, ID)
CommentLine = CommentLine.strip()
if CommentLine.startswith('Copyright'):
NoCopyrightFlag = False
@@ -2403,9 +2403,9 @@ def CheckFileHeaderDoxygenComments(FullFileName):
# Check whether C File header Comment's each reference at list should begin with a bullet character.
if EccGlobalData.gConfig.HeaderCheckCFileCommentReferenceFormat == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
if RefListFlag == True:
if RefLine.strip() and RefLine.strip().startswith('**/') == False and RefLine.startswith(' -') == False:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'Each reference on a separate line should begin with a bullet character ""-"" ', FileTable, ID)
if RefLine.strip() and RefLine.strip().startswith('**/') == False and RefLine.startswith(' -') == False:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'Each reference on a separate line should begin with a bullet character ""-"" ', FileTable, ID)
if NoHeaderCommentStartFlag:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FILE_HEADER, 'File header comment should begin with ""/** @file""', FileTable, ID)
return