2014-01-27 06:23:15 +01:00
## @file
# Common routines used by all tools
#
2017-02-07 09:05:13 +01:00
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
2014-01-27 06:23:15 +01:00
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
2014-08-15 05:06:48 +02:00
import Common . LongFilePathOs as os
2014-01-27 06:23:15 +01:00
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
2014-08-28 15:53:34 +02:00
import shutil
2015-06-23 08:46:01 +02:00
from struct import pack
2014-01-27 06:23:15 +01:00
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
from CommonDataClass . DataClass import *
from Parsing import GetSplitValueList
2014-08-15 05:06:48 +02:00
from Common . LongFilePathSupport import OpenLongFilePath as open
2015-10-08 11:27:14 +02:00
from Common . MultipleWorkspace import MultipleWorkspace as mws
2014-01-27 06:23:15 +01:00
## Regular expression used to find out place holders in string template
2015-12-01 05:22:16 +01:00
gPlaceholderPattern = re . compile ( " \ $ \ { ([^$() \ s]+) \ } " , re . MULTILINE | re . UNICODE )
2014-01-27 06:23:15 +01:00
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = { } # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = { } # arch : {file path : [dependent files list]}
2015-02-06 04:40:27 +01:00
def GetVariableOffset ( mapfilepath , efifilepath , varnames ) :
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath : EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = [ ]
try :
f = open ( mapfilepath , ' r ' )
lines = f . readlines ( )
f . close ( )
except :
return None
if len ( lines ) == 0 : return None
firstline = lines [ 0 ] . strip ( )
if ( firstline . startswith ( " Archive member included " ) and
firstline . endswith ( " file (symbol) " ) ) :
return _parseForGCC ( lines , efifilepath , varnames )
2017-11-02 06:15:34 +01:00
if firstline . startswith ( " # Path: " ) :
return _parseForXcode ( lines , efifilepath , varnames )
2015-02-06 04:40:27 +01:00
return _parseGeneral ( lines , efifilepath , varnames )
2017-11-02 06:15:34 +01:00
def _parseForXcode ( lines , efifilepath , varnames ) :
status = 0
ret = [ ]
for index , line in enumerate ( lines ) :
line = line . strip ( )
if status == 0 and line == " # Symbols: " :
status = 1
continue
if status == 1 and len ( line ) != 0 :
for varname in varnames :
if varname in line :
m = re . match ( ' ^([ \ da-fA-FxX]+)([ \ s \ S]*)([_]* %s )$ ' % varname , line )
if m != None :
ret . append ( ( varname , m . group ( 1 ) ) )
return ret
2015-02-06 04:40:27 +01:00
def _parseForGCC ( lines , efifilepath , varnames ) :
""" Parse map file generated by GCC linker """
status = 0
sections = [ ]
varoffset = [ ]
2016-11-30 09:02:21 +01:00
for index , line in enumerate ( lines ) :
2015-02-06 04:40:27 +01:00
line = line . strip ( )
# status machine transection
if status == 0 and line == " Memory Configuration " :
status = 1
continue
elif status == 1 and line == ' Linker script and memory map ' :
status = 2
continue
elif status == 2 and line == ' START GROUP ' :
status = 3
continue
# status handler
2016-11-30 09:02:21 +01:00
if status == 3 :
2015-02-06 04:40:27 +01:00
m = re . match ( ' ^([ \ w_ \ .]+) +([ \ da-fA-Fx]+) +([ \ da-fA-Fx]+)$ ' , line )
if m != None :
sections . append ( m . groups ( 0 ) )
for varname in varnames :
2017-02-07 09:05:13 +01:00
Str = ' '
m = re . match ( " ^.data.( %s ) " % varname , line )
2015-02-06 04:40:27 +01:00
if m != None :
2017-02-07 09:05:13 +01:00
m = re . match ( " .data.( %s )$ " % varname , line )
if m != None :
Str = lines [ index + 1 ]
else :
Str = line [ len ( " .data. %s " % varname ) : ]
if Str :
m = re . match ( ' ^([ \ da-fA-Fx]+) +([ \ da-fA-Fx]+) ' , Str . strip ( ) )
2016-11-30 09:02:21 +01:00
if m != None :
varoffset . append ( ( varname , int ( m . groups ( 0 ) [ 0 ] , 16 ) , int ( sections [ - 1 ] [ 1 ] , 16 ) , sections [ - 1 ] [ 0 ] ) )
2015-02-06 04:40:27 +01:00
if not varoffset :
return [ ]
# get section information from efi file
efisecs = PeImageClass ( efifilepath ) . SectionHeaderList
if efisecs == None or len ( efisecs ) == 0 :
return [ ]
#redirection
redirection = 0
for efisec in efisecs :
for section in sections :
if section [ 0 ] . strip ( ) == efisec [ 0 ] . strip ( ) and section [ 0 ] . strip ( ) == ' .text ' :
redirection = int ( section [ 1 ] , 16 ) - efisec [ 1 ]
ret = [ ]
for var in varoffset :
for efisec in efisecs :
if var [ 1 ] > = efisec [ 1 ] and var [ 1 ] < efisec [ 1 ] + efisec [ 3 ] :
ret . append ( ( var [ 0 ] , hex ( efisec [ 2 ] + var [ 1 ] - efisec [ 1 ] - redirection ) ) )
return ret
def _parseGeneral ( lines , efifilepath , varnames ) :
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [ ] # key = section name
varoffset = [ ]
secRe = re . compile ( ' ^([ \ da-fA-F]+):([ \ da-fA-F]+) +([ \ da-fA-F]+)[Hh]? +([. \ w \ $]+) +( \ w+) ' , re . UNICODE )
symRe = re . compile ( ' ^([ \ da-fA-F]+):([ \ da-fA-F]+) +([ \ .: \\ \\ \ w \ ?@ \ $]+) +([ \ da-fA-F]+) ' , re . UNICODE )
for line in lines :
line = line . strip ( )
if re . match ( " ^Start[ ' ' ]+Length[ ' ' ]+Name[ ' ' ]+Class " , line ) :
status = 1
continue
if re . match ( " ^Address[ ' ' ]+Publics by Value[ ' ' ]+Rva \ +Base " , line ) :
status = 2
continue
if re . match ( " ^entry point at " , line ) :
status = 3
continue
if status == 1 and len ( line ) != 0 :
m = secRe . match ( line )
assert m != None , " Fail to parse the section in map file , line is %s " % line
sec_no , sec_start , sec_length , sec_name , sec_class = m . groups ( 0 )
secs . append ( [ int ( sec_no , 16 ) , int ( sec_start , 16 ) , int ( sec_length , 16 ) , sec_name , sec_class ] )
if status == 2 and len ( line ) != 0 :
for varname in varnames :
m = symRe . match ( line )
assert m != None , " Fail to parse the symbol in map file, line is %s " % line
sec_no , sym_offset , sym_name , vir_addr = m . groups ( 0 )
sec_no = int ( sec_no , 16 )
sym_offset = int ( sym_offset , 16 )
vir_addr = int ( vir_addr , 16 )
m2 = re . match ( ' ^[_]*( %s ) ' % varname , sym_name )
if m2 != None :
# fond a binary pcd entry in map file
for sec in secs :
if sec [ 0 ] == sec_no and ( sym_offset > = sec [ 1 ] and sym_offset < sec [ 1 ] + sec [ 2 ] ) :
varoffset . append ( [ varname , sec [ 3 ] , sym_offset , vir_addr , sec_no ] )
if not varoffset : return [ ]
# get section information from efi file
efisecs = PeImageClass ( efifilepath ) . SectionHeaderList
if efisecs == None or len ( efisecs ) == 0 :
return [ ]
ret = [ ]
for var in varoffset :
index = 0
for efisec in efisecs :
index = index + 1
if var [ 1 ] . strip ( ) == efisec [ 0 ] . strip ( ) :
ret . append ( ( var [ 0 ] , hex ( efisec [ 2 ] + var [ 2 ] ) ) )
elif var [ 4 ] == index :
ret . append ( ( var [ 0 ] , hex ( efisec [ 2 ] + var [ 2 ] ) ) )
return ret
2014-08-28 15:53:34 +02:00
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf ( Path , BaseName , Workspace ) :
Filename = os . path . split ( Path . File ) [ 1 ]
if ' . ' in Filename :
Filename = BaseName + Path . BaseName + Filename [ Filename . rfind ( ' . ' ) : ]
else :
Filename = BaseName + Path . BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os . path . split ( GlobalData . gDatabasePath ) [ 0 ]
if not os . path . exists ( DbDir ) :
os . makedirs ( DbDir )
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os . path . join ( DbDir ,
Filename )
RtPath = PathClass ( Path . File , Workspace )
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath . Path = TempFullPath
RtPath . BaseName = BaseName
#
# If file exists, compare contents
#
if os . path . exists ( TempFullPath ) :
with open ( str ( Path ) , ' rb ' ) as f1 : Src = f1 . read ( )
with open ( TempFullPath , ' rb ' ) as f2 : Dst = f2 . read ( )
if Src == Dst :
return RtPath
GlobalData . gTempInfs . append ( TempFullPath )
shutil . copy2 ( str ( Path ) , TempFullPath )
return RtPath
## Remove temporary created INFs whose paths were saved in gTempInfs
#
def ClearDuplicatedInf ( ) :
for File in GlobalData . gTempInfs :
if os . path . exists ( File ) :
os . remove ( File )
2014-01-27 06:23:15 +01:00
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument ( Option , OptionString , Value , Parser ) :
assert Value is None
Value = [ ]
RawArgs = Parser . rargs
while RawArgs :
Arg = RawArgs [ 0 ]
if ( Arg [ : 2 ] == " -- " and len ( Arg ) > 2 ) or \
( Arg [ : 1 ] == " - " and len ( Arg ) > 1 and Arg [ 1 ] != " - " ) :
break
Value . append ( Arg )
del RawArgs [ 0 ]
setattr ( Parser . values , Option . dest , Value )
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString ( Guid ) :
GuidList = Guid . split ( ' - ' )
Result = ' { '
2015-12-01 05:22:16 +01:00
for Index in range ( 0 , 3 , 1 ) :
2014-01-27 06:23:15 +01:00
Result = Result + ' 0x ' + GuidList [ Index ] + ' , '
Result = Result + ' { 0x ' + GuidList [ 3 ] [ 0 : 2 ] + ' , 0x ' + GuidList [ 3 ] [ 2 : 4 ]
2015-12-01 05:22:16 +01:00
for Index in range ( 0 , 12 , 2 ) :
Result = Result + ' , 0x ' + GuidList [ 4 ] [ Index : Index + 2 ]
2014-01-27 06:23:15 +01:00
Result + = ' }} '
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString ( GuidValue ) :
guidValueString = GuidValue . lower ( ) . replace ( " { " , " " ) . replace ( " } " , " " ) . replace ( " " , " " ) . replace ( " ; " , " " )
guidValueList = guidValueString . split ( " , " )
if len ( guidValueList ) != 16 :
return ' '
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try :
return " %02x %02x %02x %02x - %02x %02x - %02x %02x - %02x %02x - %02x %02x %02x %02x %02x %02x " % (
int ( guidValueList [ 3 ] , 16 ) ,
int ( guidValueList [ 2 ] , 16 ) ,
int ( guidValueList [ 1 ] , 16 ) ,
int ( guidValueList [ 0 ] , 16 ) ,
int ( guidValueList [ 5 ] , 16 ) ,
int ( guidValueList [ 4 ] , 16 ) ,
int ( guidValueList [ 7 ] , 16 ) ,
int ( guidValueList [ 6 ] , 16 ) ,
int ( guidValueList [ 8 ] , 16 ) ,
int ( guidValueList [ 9 ] , 16 ) ,
int ( guidValueList [ 10 ] , 16 ) ,
int ( guidValueList [ 11 ] , 16 ) ,
int ( guidValueList [ 12 ] , 16 ) ,
int ( guidValueList [ 13 ] , 16 ) ,
int ( guidValueList [ 14 ] , 16 ) ,
int ( guidValueList [ 15 ] , 16 )
)
except :
return ' '
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString ( GuidValue ) :
guidValueString = GuidValue . lower ( ) . replace ( " { " , " " ) . replace ( " } " , " " ) . replace ( " " , " " ) . replace ( " ; " , " " )
guidValueList = guidValueString . split ( " , " )
if len ( guidValueList ) != 11 :
return ' '
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try :
return " %08x - %04x - %04x - %02x %02x - %02x %02x %02x %02x %02x %02x " % (
int ( guidValueList [ 0 ] , 16 ) ,
int ( guidValueList [ 1 ] , 16 ) ,
int ( guidValueList [ 2 ] , 16 ) ,
int ( guidValueList [ 3 ] , 16 ) ,
int ( guidValueList [ 4 ] , 16 ) ,
int ( guidValueList [ 5 ] , 16 ) ,
int ( guidValueList [ 6 ] , 16 ) ,
int ( guidValueList [ 7 ] , 16 ) ,
int ( guidValueList [ 8 ] , 16 ) ,
int ( guidValueList [ 9 ] , 16 ) ,
int ( guidValueList [ 10 ] , 16 )
)
except :
return ' '
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName ( GuidValue ) :
guidValueString = GuidValue . lower ( ) . replace ( " { " , " " ) . replace ( " } " , " " ) . replace ( " " , " " )
guidValueList = guidValueString . split ( " , " )
if len ( guidValueList ) != 11 :
EdkLogger . error ( None , FORMAT_INVALID , " Invalid GUID value string [ %s ] " % GuidValue )
return " %08x _ %04x _ %04x _ %02x %02x _ %02x %02x %02x %02x %02x %02x " % (
int ( guidValueList [ 0 ] , 16 ) ,
int ( guidValueList [ 1 ] , 16 ) ,
int ( guidValueList [ 2 ] , 16 ) ,
int ( guidValueList [ 3 ] , 16 ) ,
int ( guidValueList [ 4 ] , 16 ) ,
int ( guidValueList [ 5 ] , 16 ) ,
int ( guidValueList [ 6 ] , 16 ) ,
int ( guidValueList [ 7 ] , 16 ) ,
int ( guidValueList [ 8 ] , 16 ) ,
int ( guidValueList [ 9 ] , 16 ) ,
int ( guidValueList [ 10 ] , 16 )
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory ( Directory ) :
if Directory == None or Directory . strip ( ) == " " :
return True
try :
if not os . access ( Directory , os . F_OK ) :
os . makedirs ( Directory )
except :
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory ( Directory , Recursively = False ) :
if Directory == None or Directory . strip ( ) == " " or not os . path . exists ( Directory ) :
return
if Recursively :
CurrentDirectory = os . getcwd ( )
os . chdir ( Directory )
for File in os . listdir ( " . " ) :
if os . path . isdir ( File ) :
RemoveDirectory ( File , Recursively )
else :
os . remove ( File )
os . chdir ( CurrentDirectory )
os . rmdir ( Directory )
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged ( File ) :
if not os . path . exists ( File ) :
return True
FileState = os . stat ( File )
TimeStamp = FileState [ - 2 ]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache [ File ] :
FileChanged = False
else :
FileChanged = True
gFileTimeStampCache [ File ] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange ( File , Content , IsBinaryFile = True ) :
if not IsBinaryFile :
Content = Content . replace ( " \n " , os . linesep )
if os . path . exists ( File ) :
try :
if Content == open ( File , " rb " ) . read ( ) :
return False
except :
EdkLogger . error ( None , FILE_OPEN_FAILURE , ExtraData = File )
DirName = os . path . dirname ( File )
if not CreateDirectory ( DirName ) :
EdkLogger . error ( None , FILE_CREATE_FAILURE , " Could not create directory %s " % DirName )
else :
if DirName == ' ' :
DirName = os . getcwd ( )
if not os . access ( DirName , os . W_OK ) :
EdkLogger . error ( None , PERMISSION_FAILURE , " Do not have write permission on directory %s " % DirName )
try :
if GlobalData . gIsWindows :
try :
from PyUtility import SaveFileToDisk
if not SaveFileToDisk ( File , Content ) :
EdkLogger . error ( None , FILE_CREATE_FAILURE , ExtraData = File )
except :
Fd = open ( File , " wb " )
Fd . write ( Content )
Fd . close ( )
else :
Fd = open ( File , " wb " )
Fd . write ( Content )
Fd . close ( )
except IOError , X :
2015-12-01 05:22:16 +01:00
EdkLogger . error ( None , FILE_CREATE_FAILURE , ExtraData = ' IOError %s ' % X )
2014-01-27 06:23:15 +01:00
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump ( Data , File ) :
Fd = None
try :
Fd = open ( File , ' wb ' )
cPickle . dump ( Data , Fd , cPickle . HIGHEST_PROTOCOL )
except :
EdkLogger . error ( " " , FILE_OPEN_FAILURE , ExtraData = File , RaiseError = False )
finally :
if Fd != None :
Fd . close ( )
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore ( File ) :
Data = None
Fd = None
try :
Fd = open ( File , ' rb ' )
Data = cPickle . load ( Fd )
except Exception , e :
EdkLogger . verbose ( " Failed to load [ %s ] \n \t %s " % ( File , str ( e ) ) )
Data = None
finally :
if Fd != None :
Fd . close ( )
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache :
_CACHE_ = set ( )
_UPPER_CACHE_ = { }
def __init__ ( self , Root ) :
self . _Root = Root
for F in os . listdir ( Root ) :
self . _CACHE_ . add ( F )
self . _UPPER_CACHE_ [ F . upper ( ) ] = F
# =[] operator
def __getitem__ ( self , Path ) :
Path = Path [ len ( os . path . commonprefix ( [ Path , self . _Root ] ) ) : ]
if not Path :
return self . _Root
if Path and Path [ 0 ] == os . path . sep :
Path = Path [ 1 : ]
if Path in self . _CACHE_ :
return os . path . join ( self . _Root , Path )
UpperPath = Path . upper ( )
if UpperPath in self . _UPPER_CACHE_ :
return os . path . join ( self . _Root , self . _UPPER_CACHE_ [ UpperPath ] )
IndexList = [ ]
LastSepIndex = - 1
SepIndex = Path . find ( os . path . sep )
while SepIndex > - 1 :
Parent = UpperPath [ : SepIndex ]
if Parent not in self . _UPPER_CACHE_ :
break
LastSepIndex = SepIndex
SepIndex = Path . find ( os . path . sep , LastSepIndex + 1 )
if LastSepIndex == - 1 :
return None
Cwd = os . getcwd ( )
os . chdir ( self . _Root )
SepIndex = LastSepIndex
while SepIndex > - 1 :
Parent = Path [ : SepIndex ]
ParentKey = UpperPath [ : SepIndex ]
if ParentKey not in self . _UPPER_CACHE_ :
os . chdir ( Cwd )
return None
if Parent in self . _CACHE_ :
ParentDir = Parent
else :
ParentDir = self . _UPPER_CACHE_ [ ParentKey ]
for F in os . listdir ( ParentDir ) :
Dir = os . path . join ( ParentDir , F )
self . _CACHE_ . add ( Dir )
self . _UPPER_CACHE_ [ Dir . upper ( ) ] = Dir
SepIndex = Path . find ( os . path . sep , SepIndex + 1 )
os . chdir ( Cwd )
if Path in self . _CACHE_ :
return os . path . join ( self . _Root , Path )
elif UpperPath in self . _UPPER_CACHE_ :
return os . path . join ( self . _Root , self . _UPPER_CACHE_ [ UpperPath ] )
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
2015-12-01 05:22:16 +01:00
def GetFiles ( Root , SkipList = None , FullPath = True ) :
2014-01-27 06:23:15 +01:00
OriPath = Root
FileList = [ ]
for Root , Dirs , Files in os . walk ( Root ) :
if SkipList :
for Item in SkipList :
if Item in Dirs :
Dirs . remove ( Item )
for File in Files :
File = os . path . normpath ( os . path . join ( Root , File ) )
if not FullPath :
File = File [ len ( OriPath ) + 1 : ]
FileList . append ( File )
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile ( File , Ext = None ) :
if Ext != None :
Dummy , FileExt = os . path . splitext ( File )
if FileExt . lower ( ) != Ext . lower ( ) :
return False
if not os . path . exists ( File ) :
return False
return True
def RealPath ( File , Dir = ' ' , OverrideDir = ' ' ) :
NewFile = os . path . normpath ( os . path . join ( Dir , File ) )
NewFile = GlobalData . gAllFiles [ NewFile ]
if not NewFile and OverrideDir :
NewFile = os . path . normpath ( os . path . join ( OverrideDir , File ) )
NewFile = GlobalData . gAllFiles [ NewFile ]
return NewFile
def RealPath2 ( File , Dir = ' ' , OverrideDir = ' ' ) :
2014-09-16 10:33:40 +02:00
NewFile = None
2014-01-27 06:23:15 +01:00
if OverrideDir :
NewFile = GlobalData . gAllFiles [ os . path . normpath ( os . path . join ( OverrideDir , File ) ) ]
if NewFile :
if OverrideDir [ - 1 ] == os . path . sep :
return NewFile [ len ( OverrideDir ) : ] , NewFile [ 0 : len ( OverrideDir ) ]
else :
2015-12-01 05:22:16 +01:00
return NewFile [ len ( OverrideDir ) + 1 : ] , NewFile [ 0 : len ( OverrideDir ) ]
2014-01-27 06:23:15 +01:00
if GlobalData . gAllFiles :
NewFile = GlobalData . gAllFiles [ os . path . normpath ( os . path . join ( Dir , File ) ) ]
2014-09-16 10:33:40 +02:00
if not NewFile :
2014-01-27 06:23:15 +01:00
NewFile = os . path . normpath ( os . path . join ( Dir , File ) )
2014-09-16 10:33:40 +02:00
if not os . path . exists ( NewFile ) :
return None , None
2014-01-27 06:23:15 +01:00
if NewFile :
if Dir :
if Dir [ - 1 ] == os . path . sep :
return NewFile [ len ( Dir ) : ] , NewFile [ 0 : len ( Dir ) ]
else :
2015-12-01 05:22:16 +01:00
return NewFile [ len ( Dir ) + 1 : ] , NewFile [ 0 : len ( Dir ) ]
2014-01-27 06:23:15 +01:00
else :
return NewFile , ' '
return None , None
## Check if gvien file exists or not
#
#
def ValidFile2 ( AllFiles , File , Ext = None , Workspace = ' ' , EfiSource = ' ' , EdkSource = ' ' , Dir = ' . ' , OverrideDir = ' ' ) :
NewFile = File
if Ext != None :
Dummy , FileExt = os . path . splitext ( File )
if FileExt . lower ( ) != Ext . lower ( ) :
return False , File
# Replace the Edk macros
if OverrideDir != ' ' and OverrideDir != None :
if OverrideDir . find ( ' $(EFI_SOURCE) ' ) > - 1 :
OverrideDir = OverrideDir . replace ( ' $(EFI_SOURCE) ' , EfiSource )
if OverrideDir . find ( ' $(EDK_SOURCE) ' ) > - 1 :
OverrideDir = OverrideDir . replace ( ' $(EDK_SOURCE) ' , EdkSource )
# Replace the default dir to current dir
if Dir == ' . ' :
Dir = os . getcwd ( )
2015-12-01 05:22:16 +01:00
Dir = Dir [ len ( Workspace ) + 1 : ]
2014-01-27 06:23:15 +01:00
# First check if File has Edk definition itself
if File . find ( ' $(EFI_SOURCE) ' ) > - 1 or File . find ( ' $(EDK_SOURCE) ' ) > - 1 :
NewFile = File . replace ( ' $(EFI_SOURCE) ' , EfiSource )
NewFile = NewFile . replace ( ' $(EDK_SOURCE) ' , EdkSource )
NewFile = AllFiles [ os . path . normpath ( NewFile ) ]
if NewFile != None :
return True , NewFile
# Second check the path with override value
if OverrideDir != ' ' and OverrideDir != None :
NewFile = AllFiles [ os . path . normpath ( os . path . join ( OverrideDir , File ) ) ]
if NewFile != None :
return True , NewFile
# Last check the path with normal definitions
File = os . path . join ( Dir , File )
NewFile = AllFiles [ os . path . normpath ( File ) ]
if NewFile != None :
return True , NewFile
return False , File
## Check if gvien file exists or not
#
#
def ValidFile3 ( AllFiles , File , Workspace = ' ' , EfiSource = ' ' , EdkSource = ' ' , Dir = ' . ' , OverrideDir = ' ' ) :
# Replace the Edk macros
if OverrideDir != ' ' and OverrideDir != None :
if OverrideDir . find ( ' $(EFI_SOURCE) ' ) > - 1 :
OverrideDir = OverrideDir . replace ( ' $(EFI_SOURCE) ' , EfiSource )
if OverrideDir . find ( ' $(EDK_SOURCE) ' ) > - 1 :
OverrideDir = OverrideDir . replace ( ' $(EDK_SOURCE) ' , EdkSource )
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == ' . ' :
Dir = os . getcwd ( )
2015-12-01 05:22:16 +01:00
Dir = Dir [ len ( Workspace ) + 1 : ]
2014-01-27 06:23:15 +01:00
NewFile = File
RelaPath = AllFiles [ os . path . normpath ( Dir ) ]
NewRelaPath = RelaPath
while ( True ) :
# First check if File has Edk definition itself
if File . find ( ' $(EFI_SOURCE) ' ) > - 1 or File . find ( ' $(EDK_SOURCE) ' ) > - 1 :
File = File . replace ( ' $(EFI_SOURCE) ' , EfiSource )
File = File . replace ( ' $(EDK_SOURCE) ' , EdkSource )
NewFile = AllFiles [ os . path . normpath ( File ) ]
if NewFile != None :
NewRelaPath = os . path . dirname ( NewFile )
File = os . path . basename ( NewFile )
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != ' ' and OverrideDir != None :
NewFile = AllFiles [ os . path . normpath ( os . path . join ( OverrideDir , File ) ) ]
if NewFile != None :
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile [ : len ( NewFile ) - len ( File . replace ( " .. \\ " , ' ' ) . replace ( " ../ " , ' ' ) ) - 1 ]
break
# Last check the path with normal definitions
NewFile = AllFiles [ os . path . normpath ( os . path . join ( Dir , File ) ) ]
if NewFile != None :
break
# No file found
break
return NewRelaPath , RelaPath , File
def GetRelPath ( Path1 , Path2 ) :
FileName = os . path . basename ( Path2 )
L1 = os . path . normpath ( Path1 ) . split ( os . path . normpath ( ' / ' ) )
L2 = os . path . normpath ( Path2 ) . split ( os . path . normpath ( ' / ' ) )
for Index in range ( 0 , len ( L1 ) ) :
if L1 [ Index ] != L2 [ Index ] :
FileName = ' ../ ' * ( len ( L1 ) - Index )
for Index2 in range ( Index , len ( L2 ) ) :
FileName = os . path . join ( FileName , L2 [ Index2 ] )
break
return os . path . normpath ( FileName )
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
2016-05-10 11:58:26 +02:00
# @param Inffile The driver file
2014-01-27 06:23:15 +01:00
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
2016-05-10 11:58:26 +02:00
def GuidValue ( CName , PackageList , Inffile = None ) :
2014-01-27 06:23:15 +01:00
for P in PackageList :
2016-05-10 11:58:26 +02:00
GuidKeys = P . Guids . keys ( )
if Inffile and P . _PrivateGuids :
if not Inffile . startswith ( P . MetaFile . Dir ) :
GuidKeys = ( dict . fromkeys ( x for x in P . Guids if x not in P . _PrivateGuids ) ) . keys ( )
if CName in GuidKeys :
2014-01-27 06:23:15 +01:00
return P . Guids [ CName ]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
2016-05-10 11:58:26 +02:00
# @param Inffile The driver file
2014-01-27 06:23:15 +01:00
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
2016-05-10 11:58:26 +02:00
def ProtocolValue ( CName , PackageList , Inffile = None ) :
2014-01-27 06:23:15 +01:00
for P in PackageList :
2016-05-10 11:58:26 +02:00
ProtocolKeys = P . Protocols . keys ( )
if Inffile and P . _PrivateProtocols :
if not Inffile . startswith ( P . MetaFile . Dir ) :
ProtocolKeys = ( dict . fromkeys ( x for x in P . Protocols if x not in P . _PrivateProtocols ) ) . keys ( )
if CName in ProtocolKeys :
2014-01-27 06:23:15 +01:00
return P . Protocols [ CName ]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
2016-05-10 11:58:26 +02:00
# @param Inffile The driver file
2014-01-27 06:23:15 +01:00
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
2016-05-10 11:58:26 +02:00
def PpiValue ( CName , PackageList , Inffile = None ) :
2014-01-27 06:23:15 +01:00
for P in PackageList :
2016-05-10 11:58:26 +02:00
PpiKeys = P . Ppis . keys ( )
if Inffile and P . _PrivatePpis :
if not Inffile . startswith ( P . MetaFile . Dir ) :
PpiKeys = ( dict . fromkeys ( x for x in P . Ppis if x not in P . _PrivatePpis ) ) . keys ( )
if CName in PpiKeys :
2014-01-27 06:23:15 +01:00
return P . Ppis [ CName ]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString ( object ) :
_REPEAT_START_FLAG = " BEGIN "
_REPEAT_END_FLAG = " END "
class Section ( object ) :
_LIST_TYPES = [ type ( [ ] ) , type ( set ( ) ) , type ( ( 0 , ) ) ]
def __init__ ( self , TemplateSection , PlaceHolderList ) :
self . _Template = TemplateSection
self . _PlaceHolderList = [ ]
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList :
self . _SubSectionList = [ ]
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
2015-12-01 05:22:16 +01:00
for PlaceHolder , Start , End in PlaceHolderList :
2014-01-27 06:23:15 +01:00
self . _SubSectionList . append ( TemplateSection [ SubSectionStart : Start ] )
self . _SubSectionList . append ( TemplateSection [ Start : End ] )
self . _PlaceHolderList . append ( PlaceHolder )
SubSectionStart = End
if SubSectionStart < len ( TemplateSection ) :
self . _SubSectionList . append ( TemplateSection [ SubSectionStart : ] )
else :
self . _SubSectionList = [ TemplateSection ]
def __str__ ( self ) :
return self . _Template + " : " + str ( self . _PlaceHolderList )
def Instantiate ( self , PlaceHolderValues ) :
RepeatTime = - 1
RepeatPlaceHolders = { }
NonRepeatPlaceHolders = { }
for PlaceHolder in self . _PlaceHolderList :
if PlaceHolder not in PlaceHolderValues :
continue
Value = PlaceHolderValues [ PlaceHolder ]
if type ( Value ) in self . _LIST_TYPES :
if RepeatTime < 0 :
RepeatTime = len ( Value )
elif RepeatTime != len ( Value ) :
EdkLogger . error (
" TemplateString " ,
PARAMETER_INVALID ,
" $ { %s } has different repeat time from others! " % PlaceHolder ,
ExtraData = str ( self . _Template )
)
RepeatPlaceHolders [ " $ { %s } " % PlaceHolder ] = Value
else :
NonRepeatPlaceHolders [ " $ { %s } " % PlaceHolder ] = Value
if NonRepeatPlaceHolders :
StringList = [ ]
for S in self . _SubSectionList :
if S not in NonRepeatPlaceHolders :
StringList . append ( S )
else :
StringList . append ( str ( NonRepeatPlaceHolders [ S ] ) )
else :
StringList = self . _SubSectionList
if RepeatPlaceHolders :
TempStringList = [ ]
for Index in range ( RepeatTime ) :
for S in StringList :
if S not in RepeatPlaceHolders :
TempStringList . append ( S )
else :
TempStringList . append ( str ( RepeatPlaceHolders [ S ] [ Index ] ) )
StringList = TempStringList
return " " . join ( StringList )
## Constructor
def __init__ ( self , Template = None ) :
self . String = ' '
self . IsBinary = False
self . _Template = Template
self . _TemplateSectionList = self . _Parse ( Template )
## str() operator
#
# @retval string The string replaced
#
def __str__ ( self ) :
return self . String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse ( self , Template ) :
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = [ ]
TemplateSectionList = [ ]
while Template :
MatchObj = gPlaceholderPattern . search ( Template , SearchFrom )
if not MatchObj :
if MatchEnd < = len ( Template ) :
TemplateSection = TemplateString . Section ( Template [ SectionStart : ] , PlaceHolderList )
TemplateSectionList . append ( TemplateSection )
break
MatchString = MatchObj . group ( 1 )
MatchStart = MatchObj . start ( )
MatchEnd = MatchObj . end ( )
if MatchString == self . _REPEAT_START_FLAG :
if MatchStart > SectionStart :
TemplateSection = TemplateString . Section ( Template [ SectionStart : MatchStart ] , PlaceHolderList )
TemplateSectionList . append ( TemplateSection )
SectionStart = MatchEnd
PlaceHolderList = [ ]
elif MatchString == self . _REPEAT_END_FLAG :
TemplateSection = TemplateString . Section ( Template [ SectionStart : MatchStart ] , PlaceHolderList )
TemplateSectionList . append ( TemplateSection )
SectionStart = MatchEnd
PlaceHolderList = [ ]
else :
PlaceHolderList . append ( ( MatchString , MatchStart - SectionStart , MatchEnd - SectionStart ) )
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append ( self , AppendString , Dictionary = None ) :
if Dictionary :
SectionList = self . _Parse ( AppendString )
self . String + = " " . join ( [ S . Instantiate ( Dictionary ) for S in SectionList ] )
else :
self . String + = AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace ( self , Dictionary = None ) :
return " " . join ( [ S . Instantiate ( Dictionary ) for S in self . _TemplateSectionList ] )
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor :
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__ ( self , OpenMessage = " " , CloseMessage = " " , ProgressChar = ' . ' , Interval = 1.0 ) :
self . PromptMessage = OpenMessage
self . CodaMessage = CloseMessage
self . ProgressChar = ProgressChar
self . Interval = Interval
if Progressor . _StopFlag == None :
Progressor . _StopFlag = threading . Event ( )
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start ( self , OpenMessage = None ) :
if OpenMessage != None :
self . PromptMessage = OpenMessage
Progressor . _StopFlag . clear ( )
if Progressor . _ProgressThread == None :
Progressor . _ProgressThread = threading . Thread ( target = self . _ProgressThreadEntry )
Progressor . _ProgressThread . setDaemon ( False )
Progressor . _ProgressThread . start ( )
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop ( self , CloseMessage = None ) :
OriginalCodaMessage = self . CodaMessage
if CloseMessage != None :
self . CodaMessage = CloseMessage
self . Abort ( )
self . CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry ( self ) :
sys . stdout . write ( self . PromptMessage + " " )
sys . stdout . flush ( )
TimeUp = 0.0
while not Progressor . _StopFlag . isSet ( ) :
if TimeUp < = 0.0 :
sys . stdout . write ( self . ProgressChar )
sys . stdout . flush ( )
TimeUp = self . Interval
time . sleep ( self . _CheckInterval )
TimeUp - = self . _CheckInterval
sys . stdout . write ( " " + self . CodaMessage + " \n " )
sys . stdout . flush ( )
## Abort the progress display
@staticmethod
def Abort ( ) :
if Progressor . _StopFlag != None :
Progressor . _StopFlag . set ( )
if Progressor . _ProgressThread != None :
Progressor . _ProgressThread . join ( )
Progressor . _ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict ( IterableUserDict ) :
## Constructor
def __init__ ( self ) :
IterableUserDict . __init__ ( self )
self . _key_list = [ ]
## [] operator
def __setitem__ ( self , key , value ) :
if key not in self . _key_list :
self . _key_list . append ( key )
IterableUserDict . __setitem__ ( self , key , value )
## del operator
def __delitem__ ( self , key ) :
self . _key_list . remove ( key )
IterableUserDict . __delitem__ ( self , key )
## used in "for k in dict" loop to ensure the correct order
def __iter__ ( self ) :
return self . iterkeys ( )
## len() support
def __len__ ( self ) :
return len ( self . _key_list )
## "in" test support
def __contains__ ( self , key ) :
return key in self . _key_list
## indexof support
def index ( self , key ) :
return self . _key_list . index ( key )
## insert support
def insert ( self , key , newkey , newvalue , order ) :
index = self . _key_list . index ( key )
if order == ' BEFORE ' :
self . _key_list . insert ( index , newkey )
IterableUserDict . __setitem__ ( self , newkey , newvalue )
elif order == ' AFTER ' :
self . _key_list . insert ( index + 1 , newkey )
IterableUserDict . __setitem__ ( self , newkey , newvalue )
## append support
def append ( self , sdict ) :
for key in sdict :
if key not in self . _key_list :
self . _key_list . append ( key )
IterableUserDict . __setitem__ ( self , key , sdict [ key ] )
def has_key ( self , key ) :
return key in self . _key_list
## Empty the dict
def clear ( self ) :
self . _key_list = [ ]
IterableUserDict . clear ( self )
## Return a copy of keys
def keys ( self ) :
keys = [ ]
for key in self . _key_list :
keys . append ( key )
return keys
## Return a copy of values
def values ( self ) :
values = [ ]
for key in self . _key_list :
values . append ( self [ key ] )
return values
## Return a copy of (key, value) list
def items ( self ) :
items = [ ]
for key in self . _key_list :
items . append ( ( key , self [ key ] ) )
return items
## Iteration support
def iteritems ( self ) :
return iter ( self . items ( ) )
## Keys interation support
def iterkeys ( self ) :
return iter ( self . keys ( ) )
## Values interation support
def itervalues ( self ) :
return iter ( self . values ( ) )
## Return value related to a key, and remove the (key, value) from the dict
def pop ( self , key , * dv ) :
value = None
if key in self . _key_list :
value = self [ key ]
self . __delitem__ ( key )
elif len ( dv ) != 0 :
value = kv [ 0 ]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem ( self ) :
key = self . _key_list [ - 1 ]
value = self [ key ]
self . __delitem__ ( key )
return key , value
def update ( self , dict = None , * * kwargs ) :
if dict != None :
for k , v in dict . items ( ) :
self [ k ] = v
if len ( kwargs ) :
for k , v in kwargs . items ( ) :
self [ k ] = v
## Dictionary with restricted keys
#
class rdict ( dict ) :
## Constructor
def __init__ ( self , KeyList ) :
for Key in KeyList :
dict . __setitem__ ( self , Key , " " )
## []= operator
def __setitem__ ( self , key , value ) :
if key not in self :
EdkLogger . error ( " RestrictedDict " , ATTRIBUTE_SET_FAILURE , " Key [ %s ] is not allowed " % key ,
ExtraData = " , " . join ( dict . keys ( self ) ) )
dict . __setitem__ ( self , key , value )
## =[] operator
def __getitem__ ( self , key ) :
if key not in self :
return " "
return dict . __getitem__ ( self , key )
## del operator
def __delitem__ ( self , key ) :
EdkLogger . error ( " RestrictedDict " , ATTRIBUTE_ACCESS_DENIED , ExtraData = " del " )
## Empty the dict
def clear ( self ) :
for Key in self :
self . __setitem__ ( Key , " " )
## Return value related to a key, and remove the (key, value) from the dict
def pop ( self , key , * dv ) :
EdkLogger . error ( " RestrictedDict " , ATTRIBUTE_ACCESS_DENIED , ExtraData = " pop " )
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem ( self ) :
EdkLogger . error ( " RestrictedDict " , ATTRIBUTE_ACCESS_DENIED , ExtraData = " popitem " )
## Dictionary using prioritized list as key
#
class tdict :
_ListType = type ( [ ] )
_TupleType = type ( ( ) )
_Wildcard = ' COMMON '
_ValidWildcardList = [ ' COMMON ' , ' DEFAULT ' , ' ALL ' , ' * ' , ' PLATFORM ' ]
def __init__ ( self , _Single_ = False , _Level_ = 2 ) :
self . _Level_ = _Level_
self . data = { }
self . _Single_ = _Single_
# =[] operator
def __getitem__ ( self , key ) :
KeyType = type ( key )
RestKeys = None
if KeyType == self . _ListType or KeyType == self . _TupleType :
FirstKey = key [ 0 ]
if len ( key ) > 1 :
RestKeys = key [ 1 : ]
elif self . _Level_ > 1 :
2015-12-01 05:22:16 +01:00
RestKeys = [ self . _Wildcard for i in range ( 0 , self . _Level_ - 1 ) ]
2014-01-27 06:23:15 +01:00
else :
FirstKey = key
if self . _Level_ > 1 :
2015-12-01 05:22:16 +01:00
RestKeys = [ self . _Wildcard for i in range ( 0 , self . _Level_ - 1 ) ]
2014-01-27 06:23:15 +01:00
if FirstKey == None or str ( FirstKey ) . upper ( ) in self . _ValidWildcardList :
FirstKey = self . _Wildcard
if self . _Single_ :
return self . _GetSingleValue ( FirstKey , RestKeys )
else :
return self . _GetAllValues ( FirstKey , RestKeys )
def _GetSingleValue ( self , FirstKey , RestKeys ) :
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self . _Level_ > 1 :
if FirstKey == self . _Wildcard :
if FirstKey in self . data :
Value = self . data [ FirstKey ] [ RestKeys ]
if Value == None :
for Key in self . data :
Value = self . data [ Key ] [ RestKeys ]
if Value != None : break
else :
if FirstKey in self . data :
Value = self . data [ FirstKey ] [ RestKeys ]
if Value == None and self . _Wildcard in self . data :
#print "Value=None"
Value = self . data [ self . _Wildcard ] [ RestKeys ]
else :
if FirstKey == self . _Wildcard :
if FirstKey in self . data :
Value = self . data [ FirstKey ]
if Value == None :
for Key in self . data :
Value = self . data [ Key ]
if Value != None : break
else :
if FirstKey in self . data :
Value = self . data [ FirstKey ]
elif self . _Wildcard in self . data :
Value = self . data [ self . _Wildcard ]
return Value
def _GetAllValues ( self , FirstKey , RestKeys ) :
Value = [ ]
if self . _Level_ > 1 :
if FirstKey == self . _Wildcard :
for Key in self . data :
Value + = self . data [ Key ] [ RestKeys ]
else :
if FirstKey in self . data :
Value + = self . data [ FirstKey ] [ RestKeys ]
if self . _Wildcard in self . data :
Value + = self . data [ self . _Wildcard ] [ RestKeys ]
else :
if FirstKey == self . _Wildcard :
for Key in self . data :
Value . append ( self . data [ Key ] )
else :
if FirstKey in self . data :
Value . append ( self . data [ FirstKey ] )
if self . _Wildcard in self . data :
Value . append ( self . data [ self . _Wildcard ] )
return Value
## []= operator
def __setitem__ ( self , key , value ) :
KeyType = type ( key )
RestKeys = None
if KeyType == self . _ListType or KeyType == self . _TupleType :
FirstKey = key [ 0 ]
if len ( key ) > 1 :
RestKeys = key [ 1 : ]
else :
2015-12-01 05:22:16 +01:00
RestKeys = [ self . _Wildcard for i in range ( 0 , self . _Level_ - 1 ) ]
2014-01-27 06:23:15 +01:00
else :
FirstKey = key
if self . _Level_ > 1 :
2015-12-01 05:22:16 +01:00
RestKeys = [ self . _Wildcard for i in range ( 0 , self . _Level_ - 1 ) ]
2014-01-27 06:23:15 +01:00
if FirstKey in self . _ValidWildcardList :
FirstKey = self . _Wildcard
if FirstKey not in self . data and self . _Level_ > 0 :
self . data [ FirstKey ] = tdict ( self . _Single_ , self . _Level_ - 1 )
if self . _Level_ > 1 :
self . data [ FirstKey ] [ RestKeys ] = value
else :
self . data [ FirstKey ] = value
def SetGreedyMode ( self ) :
self . _Single_ = False
if self . _Level_ > 1 :
for Key in self . data :
self . data [ Key ] . SetGreedyMode ( )
def SetSingleMode ( self ) :
self . _Single_ = True
if self . _Level_ > 1 :
for Key in self . data :
self . data [ Key ] . SetSingleMode ( )
def GetKeys ( self , KeyIndex = 0 ) :
assert KeyIndex > = 0
if KeyIndex == 0 :
return set ( self . data . keys ( ) )
else :
keys = set ( )
for Key in self . data :
keys | = self . data [ Key ] . GetKeys ( KeyIndex - 1 )
return keys
## Boolean chain list
#
class Blist ( UserList ) :
def __init__ ( self , initlist = None ) :
UserList . __init__ ( self , initlist )
def __setitem__ ( self , i , item ) :
if item not in [ True , False ] :
if item == 0 :
item = False
else :
item = True
self . data [ i ] = item
def _GetResult ( self ) :
Value = True
for item in self . data :
Value & = item
return Value
Result = property ( _GetResult )
def ParseConsoleLog ( Filename ) :
Opr = open ( os . path . normpath ( Filename ) , ' r ' )
Opw = open ( os . path . normpath ( Filename + ' .New ' ) , ' w+ ' )
for Line in Opr . readlines ( ) :
if Line . find ( ' .efi ' ) > - 1 :
Line = Line [ Line . rfind ( ' ' ) : Line . rfind ( ' .efi ' ) ] . strip ( )
Opw . write ( ' %s \n ' % Line )
Opr . close ( )
Opw . close ( )
2016-10-12 07:28:36 +02:00
def AnalyzePcdExpression ( Setting ) :
2014-01-27 06:23:15 +01:00
Setting = Setting . strip ( )
# There might be escaped quote in a string: \", \\\"
Data = Setting . replace ( ' \\ \\ ' , ' // ' ) . replace ( ' \\ \" ' , ' \\ \' ' )
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ' '
InStr = False
Pair = 0
for ch in Data :
if ch == ' " ' :
InStr = not InStr
elif ch == ' ( ' and not InStr :
Pair + = 1
elif ch == ' ) ' and not InStr :
Pair - = 1
2015-12-01 05:22:16 +01:00
2014-01-27 06:23:15 +01:00
if ( Pair > 0 or InStr ) and ch == TAB_VALUE_SPLIT :
NewStr + = ' - '
else :
NewStr + = ch
FieldList = [ ]
StartPos = 0
while True :
Pos = NewStr . find ( TAB_VALUE_SPLIT , StartPos )
if Pos < 0 :
FieldList . append ( Setting [ StartPos : ] . strip ( ) )
break
FieldList . append ( Setting [ StartPos : Pos ] . strip ( ) )
StartPos = Pos + 1
2016-10-12 07:28:36 +02:00
return FieldList
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd ( Setting , PcdType , DataType = ' ' ) :
FieldList = AnalyzePcdExpression ( Setting )
2014-01-27 06:23:15 +01:00
IsValid = True
if PcdType in ( MODEL_PCD_FIXED_AT_BUILD , MODEL_PCD_PATCHABLE_IN_MODULE , MODEL_PCD_FEATURE_FLAG ) :
Value = FieldList [ 0 ]
Size = ' '
if len ( FieldList ) > 1 :
Type = FieldList [ 1 ]
# Fix the PCD type when no DataType input
if Type == ' VOID* ' :
DataType = ' VOID* '
else :
Size = FieldList [ 1 ]
if len ( FieldList ) > 2 :
Size = FieldList [ 2 ]
if DataType == ' VOID* ' :
IsValid = ( len ( FieldList ) < = 3 )
else :
IsValid = ( len ( FieldList ) < = 1 )
return [ Value , ' ' , Size ] , IsValid , 0
elif PcdType in ( MODEL_PCD_DYNAMIC_DEFAULT , MODEL_PCD_DYNAMIC_EX_DEFAULT ) :
Value = FieldList [ 0 ]
Size = Type = ' '
if len ( FieldList ) > 1 :
Type = FieldList [ 1 ]
else :
Type = DataType
if len ( FieldList ) > 2 :
Size = FieldList [ 2 ]
else :
if Type == ' VOID* ' :
if Value . startswith ( " L " ) :
Size = str ( ( len ( Value ) - 3 + 1 ) * 2 )
elif Value . startswith ( " { " ) :
Size = str ( len ( Value . split ( " , " ) ) )
else :
Size = str ( len ( Value ) - 2 + 1 )
if DataType == ' VOID* ' :
IsValid = ( len ( FieldList ) < = 3 )
else :
IsValid = ( len ( FieldList ) < = 1 )
2015-12-01 05:22:16 +01:00
return [ Value , Type , Size ] , IsValid , 0
2014-01-27 06:23:15 +01:00
elif PcdType in ( MODEL_PCD_DYNAMIC_VPD , MODEL_PCD_DYNAMIC_EX_VPD ) :
VpdOffset = FieldList [ 0 ]
Value = Size = ' '
if not DataType == ' VOID* ' :
if len ( FieldList ) > 1 :
Value = FieldList [ 1 ]
else :
if len ( FieldList ) > 1 :
Size = FieldList [ 1 ]
if len ( FieldList ) > 2 :
Value = FieldList [ 2 ]
if DataType == ' VOID* ' :
IsValid = ( len ( FieldList ) < = 3 )
else :
IsValid = ( len ( FieldList ) < = 2 )
return [ VpdOffset , Size , Value ] , IsValid , 2
elif PcdType in ( MODEL_PCD_DYNAMIC_HII , MODEL_PCD_DYNAMIC_EX_HII ) :
HiiString = FieldList [ 0 ]
2015-04-10 08:59:47 +02:00
Guid = Offset = Value = Attribute = ' '
2014-01-27 06:23:15 +01:00
if len ( FieldList ) > 1 :
Guid = FieldList [ 1 ]
if len ( FieldList ) > 2 :
Offset = FieldList [ 2 ]
if len ( FieldList ) > 3 :
Value = FieldList [ 3 ]
2015-04-10 08:59:47 +02:00
if len ( FieldList ) > 4 :
Attribute = FieldList [ 4 ]
IsValid = ( 3 < = len ( FieldList ) < = 5 )
return [ HiiString , Guid , Offset , Value , Attribute ] , IsValid , 3
2014-01-27 06:23:15 +01:00
return [ ] , False , 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
2015-12-01 05:22:16 +01:00
def AnalyzePcdData ( Setting ) :
ValueList = [ ' ' , ' ' , ' ' ]
ValueRe = re . compile ( r ' ^ \ s*L? \ " .* \ |.* \ " ' )
2014-01-27 06:23:15 +01:00
PtrValue = ValueRe . findall ( Setting )
ValueUpdateFlag = False
if len ( PtrValue ) > = 1 :
Setting = re . sub ( ValueRe , ' ' , Setting )
2015-12-01 05:22:16 +01:00
ValueUpdateFlag = True
2014-01-27 06:23:15 +01:00
TokenList = Setting . split ( TAB_VALUE_SPLIT )
ValueList [ 0 : len ( TokenList ) ] = TokenList
if ValueUpdateFlag :
ValueList [ 0 ] = PtrValue [ 0 ]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData ( Setting ) :
ValueList = [ ' ' , ' ' , ' ' , ' ' ]
TokenList = GetSplitValueList ( Setting )
ValueList [ 0 : len ( TokenList ) ] = TokenList
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
#
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
#
2015-12-01 05:22:16 +01:00
def AnalyzeVpdPcdData ( Setting ) :
ValueList = [ ' ' , ' ' , ' ' ]
ValueRe = re . compile ( r ' \ s*L? \ " .* \ |.* \ " \ s*$ ' )
2014-01-27 06:23:15 +01:00
PtrValue = ValueRe . findall ( Setting )
ValueUpdateFlag = False
if len ( PtrValue ) > = 1 :
Setting = re . sub ( ValueRe , ' ' , Setting )
2015-12-01 05:22:16 +01:00
ValueUpdateFlag = True
2014-01-27 06:23:15 +01:00
TokenList = Setting . split ( TAB_VALUE_SPLIT )
ValueList [ 0 : len ( TokenList ) ] = TokenList
if ValueUpdateFlag :
ValueList [ 2 ] = PtrValue [ 0 ]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum ( Type , Value ) :
if Type == " VOID* " :
2015-12-01 05:22:16 +01:00
ValueRe = re . compile ( r ' \ s*L? \ " .* \ " \ s*$ ' )
2014-01-27 06:23:15 +01:00
if not ( ( ( Value . startswith ( ' L " ' ) or Value . startswith ( ' " ' ) ) and Value . endswith ( ' " ' ) )
or ( Value . startswith ( ' { ' ) and Value . endswith ( ' } ' ) )
) :
return False , " Invalid value [ %s ] of type [ %s ]; must be in the form of { ...} for array " \
2015-12-01 05:22:16 +01:00
" , or \" ... \" for string, or L \" ... \" for unicode string " % ( Value , Type )
2014-01-27 06:23:15 +01:00
elif ValueRe . match ( Value ) :
# Check the chars in UnicodeString or CString is printable
if Value . startswith ( " L " ) :
Value = Value [ 2 : - 1 ]
else :
Value = Value [ 1 : - 1 ]
Printset = set ( string . printable )
Printset . remove ( TAB_PRINTCHAR_VT )
Printset . add ( TAB_PRINTCHAR_BS )
Printset . add ( TAB_PRINTCHAR_NUL )
if not set ( Value ) . issubset ( Printset ) :
PrintList = list ( Printset )
PrintList . sort ( )
return False , " Invalid PCD string value of type [ %s ]; must be printable chars %s . " % ( Type , PrintList )
elif Type == ' BOOLEAN ' :
if Value not in [ ' TRUE ' , ' True ' , ' true ' , ' 0x1 ' , ' 0x01 ' , ' 1 ' , ' FALSE ' , ' False ' , ' false ' , ' 0x0 ' , ' 0x00 ' , ' 0 ' ] :
return False , " Invalid value [ %s ] of type [ %s ]; must be one of TRUE, True, true, 0x1, 0x01, 1 " \
" , FALSE, False, false, 0x0, 0x00, 0 " % ( Value , Type )
elif Type in [ TAB_UINT8 , TAB_UINT16 , TAB_UINT32 , TAB_UINT64 ] :
try :
Value = long ( Value , 0 )
except :
return False , " Invalid value [ %s ] of type [ %s ]; " \
" must be a hexadecimal, decimal or octal in C language format. " % ( Value , Type )
else :
return False , " Invalid type [ %s ]; must be one of VOID*, BOOLEAN, UINT8, UINT16, UINT32, UINT64. " % ( Type )
return True , " "
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption ( OptionString ) :
OptionList = [ ]
LastChar = " "
OptionStart = 0
QuotationMark = " "
for Index in range ( 0 , len ( OptionString ) ) :
CurrentChar = OptionString [ Index ]
if CurrentChar in [ ' " ' , " ' " ] :
if QuotationMark == CurrentChar :
QuotationMark = " "
elif QuotationMark == " " :
QuotationMark = CurrentChar
continue
elif QuotationMark :
continue
if CurrentChar in [ " / " , " - " ] and LastChar in [ " " , " \t " , " \r " , " \n " ] :
if Index > OptionStart :
2015-12-01 05:22:16 +01:00
OptionList . append ( OptionString [ OptionStart : Index - 1 ] )
2014-01-27 06:23:15 +01:00
OptionStart = Index
LastChar = CurrentChar
OptionList . append ( OptionString [ OptionStart : ] )
return OptionList
def CommonPath ( PathList ) :
P1 = min ( PathList ) . split ( os . path . sep )
P2 = max ( PathList ) . split ( os . path . sep )
for Index in xrange ( min ( len ( P1 ) , len ( P2 ) ) ) :
if P1 [ Index ] != P2 [ Index ] :
return os . path . sep . join ( P1 [ : Index ] )
return os . path . sep . join ( P1 )
2014-08-28 15:53:34 +02:00
#
# Convert string to C format array
#
def ConvertStringToByteArray ( Value ) :
Value = Value . strip ( )
if not Value :
return None
if Value [ 0 ] == ' { ' :
if not Value . endswith ( ' } ' ) :
return None
Value = Value . replace ( ' ' , ' ' ) . replace ( ' { ' , ' ' ) . replace ( ' } ' , ' ' )
ValFields = Value . split ( ' , ' )
try :
for Index in range ( len ( ValFields ) ) :
ValFields [ Index ] = str ( int ( ValFields [ Index ] , 0 ) )
except ValueError :
return None
Value = ' { ' + ' , ' . join ( ValFields ) + ' } '
return Value
Unicode = False
if Value . startswith ( ' L " ' ) :
if not Value . endswith ( ' " ' ) :
return None
Value = Value [ 1 : ]
Unicode = True
elif not Value . startswith ( ' " ' ) or not Value . endswith ( ' " ' ) :
return None
Value = eval ( Value ) # translate escape character
NewValue = ' { '
for Index in range ( 0 , len ( Value ) ) :
if Unicode :
NewValue = NewValue + str ( ord ( Value [ Index ] ) % 0x10000 ) + ' , '
else :
NewValue = NewValue + str ( ord ( Value [ Index ] ) % 0x100 ) + ' , '
Value = NewValue + ' 0} '
return Value
2014-01-27 06:23:15 +01:00
class PathClass ( object ) :
def __init__ ( self , File = ' ' , Root = ' ' , AlterRoot = ' ' , Type = ' ' , IsBinary = False ,
Arch = ' COMMON ' , ToolChainFamily = ' ' , Target = ' ' , TagName = ' ' , ToolCode = ' ' ) :
self . Arch = Arch
self . File = str ( File )
if os . path . isabs ( self . File ) :
self . Root = ' '
self . AlterRoot = ' '
else :
self . Root = str ( Root )
self . AlterRoot = str ( AlterRoot )
# Remove any '.' and '..' in path
if self . Root :
2015-10-08 11:27:14 +02:00
self . Root = mws . getWs ( self . Root , self . File )
2014-01-27 06:23:15 +01:00
self . Path = os . path . normpath ( os . path . join ( self . Root , self . File ) )
self . Root = os . path . normpath ( CommonPath ( [ self . Root , self . Path ] ) )
# eliminate the side-effect of 'C:'
if self . Root [ - 1 ] == ' : ' :
self . Root + = os . path . sep
# file path should not start with path separator
if self . Root [ - 1 ] == os . path . sep :
self . File = self . Path [ len ( self . Root ) : ]
else :
2015-12-01 05:22:16 +01:00
self . File = self . Path [ len ( self . Root ) + 1 : ]
2014-01-27 06:23:15 +01:00
else :
self . Path = os . path . normpath ( self . File )
self . SubDir , self . Name = os . path . split ( self . File )
self . BaseName , self . Ext = os . path . splitext ( self . Name )
if self . Root :
if self . SubDir :
self . Dir = os . path . join ( self . Root , self . SubDir )
else :
self . Dir = self . Root
else :
self . Dir = self . SubDir
if IsBinary :
self . Type = Type
else :
self . Type = self . Ext . lower ( )
self . IsBinary = IsBinary
self . Target = Target
self . TagName = TagName
self . ToolCode = ToolCode
self . ToolChainFamily = ToolChainFamily
self . _Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__ ( self ) :
return self . Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__ ( self , Other ) :
if type ( Other ) == type ( self ) :
return self . Path == Other . Path
else :
return self . Path == str ( Other )
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__ ( self , Other ) :
if type ( Other ) == type ( self ) :
OtherKey = Other . Path
else :
OtherKey = str ( Other )
SelfKey = self . Path
if SelfKey == OtherKey :
return 0
elif SelfKey > OtherKey :
return 1
else :
return - 1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__ ( self ) :
return hash ( self . Path )
def _GetFileKey ( self ) :
if self . _Key == None :
self . _Key = self . Path . upper ( ) # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self . _Key
def _GetTimeStamp ( self ) :
return os . stat ( self . Path ) [ 8 ]
def Validate ( self , Type = ' ' , CaseSensitive = True ) :
if GlobalData . gCaseInsensitive :
CaseSensitive = False
if Type and Type . lower ( ) != self . Type :
return FILE_TYPE_MISMATCH , ' %s (expect %s but got %s ) ' % ( self . File , Type , self . Type )
RealFile , RealRoot = RealPath2 ( self . File , self . Root , self . AlterRoot )
if not RealRoot and not RealFile :
RealFile = self . File
if self . AlterRoot :
RealFile = os . path . join ( self . AlterRoot , self . File )
elif self . Root :
RealFile = os . path . join ( self . Root , self . File )
2015-10-08 11:27:14 +02:00
if len ( mws . getPkgPath ( ) ) == 0 :
return FILE_NOT_FOUND , os . path . join ( self . AlterRoot , RealFile )
else :
return FILE_NOT_FOUND , " %s is not found in packages path: \n \t %s " % ( self . File , ' \n \t ' . join ( mws . getPkgPath ( ) ) )
2014-01-27 06:23:15 +01:00
ErrorCode = 0
ErrorInfo = ' '
if RealRoot != self . Root or RealFile != self . File :
if CaseSensitive and ( RealFile != self . File or ( RealRoot != self . Root and RealRoot != self . AlterRoot ) ) :
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self . File + ' \n \t ' + RealFile + " [in file system] "
self . SubDir , self . Name = os . path . split ( RealFile )
self . BaseName , self . Ext = os . path . splitext ( self . Name )
if self . SubDir :
self . Dir = os . path . join ( RealRoot , self . SubDir )
else :
self . Dir = RealRoot
self . File = RealFile
self . Root = RealRoot
self . Path = os . path . join ( RealRoot , RealFile )
return ErrorCode , ErrorInfo
Key = property ( _GetFileKey )
TimeStamp = property ( _GetTimeStamp )
## Parse PE image to get the required PE informaion.
#
class PeImageClass ( ) :
## Constructor
#
# @param File FilePath of PeImage
#
def __init__ ( self , PeFile ) :
self . FileName = PeFile
self . IsValid = False
self . Size = 0
self . EntryPoint = 0
self . SectionAlignment = 0
self . SectionHeaderList = [ ]
self . ErrorInfo = ' '
try :
PeObject = open ( PeFile , ' rb ' )
except :
self . ErrorInfo = self . FileName + ' can not be found \n '
return
# Read DOS header
ByteArray = array . array ( ' B ' )
ByteArray . fromfile ( PeObject , 0x3E )
ByteList = ByteArray . tolist ( )
# DOS signature should be 'MZ'
if self . _ByteListToStr ( ByteList [ 0x0 : 0x2 ] ) != ' MZ ' :
self . ErrorInfo = self . FileName + ' has no valid DOS signature MZ '
return
# Read 4 byte PE Signature
PeOffset = self . _ByteListToInt ( ByteList [ 0x3C : 0x3E ] )
PeObject . seek ( PeOffset )
ByteArray = array . array ( ' B ' )
ByteArray . fromfile ( PeObject , 4 )
# PE signature should be 'PE\0\0'
if ByteArray . tostring ( ) != ' PE \0 \0 ' :
self . ErrorInfo = self . FileName + ' has no valid PE signature PE00 '
return
# Read PE file header
ByteArray = array . array ( ' B ' )
ByteArray . fromfile ( PeObject , 0x14 )
ByteList = ByteArray . tolist ( )
SecNumber = self . _ByteListToInt ( ByteList [ 0x2 : 0x4 ] )
if SecNumber == 0 :
self . ErrorInfo = self . FileName + ' has no section header '
return
# Read PE optional header
OptionalHeaderSize = self . _ByteListToInt ( ByteArray [ 0x10 : 0x12 ] )
ByteArray = array . array ( ' B ' )
ByteArray . fromfile ( PeObject , OptionalHeaderSize )
ByteList = ByteArray . tolist ( )
self . EntryPoint = self . _ByteListToInt ( ByteList [ 0x10 : 0x14 ] )
self . SectionAlignment = self . _ByteListToInt ( ByteList [ 0x20 : 0x24 ] )
self . Size = self . _ByteListToInt ( ByteList [ 0x38 : 0x3C ] )
# Read each Section Header
for Index in range ( SecNumber ) :
ByteArray = array . array ( ' B ' )
ByteArray . fromfile ( PeObject , 0x28 )
ByteList = ByteArray . tolist ( )
SecName = self . _ByteListToStr ( ByteList [ 0 : 8 ] )
SecVirtualSize = self . _ByteListToInt ( ByteList [ 8 : 12 ] )
SecRawAddress = self . _ByteListToInt ( ByteList [ 20 : 24 ] )
SecVirtualAddress = self . _ByteListToInt ( ByteList [ 12 : 16 ] )
self . SectionHeaderList . append ( ( SecName , SecVirtualAddress , SecRawAddress , SecVirtualSize ) )
self . IsValid = True
PeObject . close ( )
def _ByteListToStr ( self , ByteList ) :
String = ' '
for index in range ( len ( ByteList ) ) :
if ByteList [ index ] == 0 :
break
String + = chr ( ByteList [ index ] )
return String
def _ByteListToInt ( self , ByteList ) :
Value = 0
for index in range ( len ( ByteList ) - 1 , - 1 , - 1 ) :
Value = ( Value << 8 ) | int ( ByteList [ index ] )
return Value
class SkuClass ( ) :
DEFAULT = 0
SINGLE = 1
MULTIPLE = 2
def __init__ ( self , SkuIdentifier = ' ' , SkuIds = { } ) :
self . AvailableSkuIds = sdict ( )
self . SkuIdSet = [ ]
2015-04-10 09:06:13 +02:00
self . SkuIdNumberSet = [ ]
2014-01-27 06:23:15 +01:00
if SkuIdentifier == ' ' or SkuIdentifier is None :
self . SkuIdSet = [ ' DEFAULT ' ]
2015-04-10 09:06:13 +02:00
self . SkuIdNumberSet = [ ' 0U ' ]
2014-01-27 06:23:15 +01:00
elif SkuIdentifier == ' ALL ' :
self . SkuIdSet = SkuIds . keys ( )
2015-04-10 09:06:13 +02:00
self . SkuIdNumberSet = [ num . strip ( ) + ' U ' for num in SkuIds . values ( ) ]
2014-01-27 06:23:15 +01:00
else :
r = SkuIdentifier . split ( ' | ' )
self . SkuIdSet = [ r [ k ] . strip ( ) for k in range ( len ( r ) ) ]
2015-04-10 09:06:13 +02:00
k = None
try :
self . SkuIdNumberSet = [ SkuIds [ k ] . strip ( ) + ' U ' for k in self . SkuIdSet ]
except Exception :
EdkLogger . error ( " build " , PARAMETER_INVALID ,
ExtraData = " SKU-ID [ %s ] is not supported by the platform. [Valid SKU-ID: %s ] "
2017-04-01 07:57:16 +02:00
% ( k , " | " . join ( SkuIds . keys ( ) ) ) )
2014-01-27 06:23:15 +01:00
if len ( self . SkuIdSet ) == 2 and ' DEFAULT ' in self . SkuIdSet and SkuIdentifier != ' ALL ' :
self . SkuIdSet . remove ( ' DEFAULT ' )
2015-04-10 09:06:13 +02:00
self . SkuIdNumberSet . remove ( ' 0U ' )
2014-01-27 06:23:15 +01:00
for each in self . SkuIdSet :
if each in SkuIds :
self . AvailableSkuIds [ each ] = SkuIds [ each ]
else :
EdkLogger . error ( " build " , PARAMETER_INVALID ,
ExtraData = " SKU-ID [ %s ] is not supported by the platform. [Valid SKU-ID: %s ] "
2017-04-01 07:57:16 +02:00
% ( each , " | " . join ( SkuIds . keys ( ) ) ) )
2014-01-27 06:23:15 +01:00
def __SkuUsageType ( self ) :
if len ( self . SkuIdSet ) == 1 :
if self . SkuIdSet [ 0 ] == ' DEFAULT ' :
return SkuClass . DEFAULT
else :
return SkuClass . SINGLE
else :
return SkuClass . MULTIPLE
def __GetAvailableSkuIds ( self ) :
return self . AvailableSkuIds
def __GetSystemSkuID ( self ) :
if self . __SkuUsageType ( ) == SkuClass . SINGLE :
return self . SkuIdSet [ 0 ]
else :
return ' DEFAULT '
2015-04-10 09:06:13 +02:00
def __GetAvailableSkuIdNumber ( self ) :
return self . SkuIdNumberSet
2014-01-27 06:23:15 +01:00
SystemSkuId = property ( __GetSystemSkuID )
AvailableSkuIdSet = property ( __GetAvailableSkuIds )
SkuUsageType = property ( __SkuUsageType )
2015-04-10 09:06:13 +02:00
AvailableSkuIdNumSet = property ( __GetAvailableSkuIdNumber )
2015-06-23 08:46:01 +02:00
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid ( Guid ) :
Guid = Guid . split ( ' - ' )
return pack ( ' =LHHBBBBBBBB ' ,
int ( Guid [ 0 ] , 16 ) ,
int ( Guid [ 1 ] , 16 ) ,
int ( Guid [ 2 ] , 16 ) ,
int ( Guid [ 3 ] [ - 4 : - 2 ] , 16 ) ,
int ( Guid [ 3 ] [ - 2 : ] , 16 ) ,
int ( Guid [ 4 ] [ - 12 : - 10 ] , 16 ) ,
int ( Guid [ 4 ] [ - 10 : - 8 ] , 16 ) ,
int ( Guid [ 4 ] [ - 8 : - 6 ] , 16 ) ,
int ( Guid [ 4 ] [ - 6 : - 4 ] , 16 ) ,
int ( Guid [ 4 ] [ - 4 : - 2 ] , 16 ) ,
int ( Guid [ 4 ] [ - 2 : ] , 16 )
)
2017-05-12 06:12:23 +02:00
def BuildOptionPcdValueFormat ( TokenSpaceGuidCName , TokenCName , PcdDatumType , Value ) :
if PcdDatumType == ' VOID* ' :
if Value . startswith ( ' L ' ) :
if not Value [ 1 ] :
EdkLogger . error ( " build " , FORMAT_INVALID , ' For Void* type PCD, when specify the Value in the command line, please use the following format: " string " , L " string " , H " { ...} " ' )
Value = Value [ 0 ] + ' " ' + Value [ 1 : ] + ' " '
elif Value . startswith ( ' H ' ) :
if not Value [ 1 ] :
EdkLogger . error ( " build " , FORMAT_INVALID , ' For Void* type PCD, when specify the Value in the command line, please use the following format: " string " , L " string " , H " { ...} " ' )
Value = Value [ 1 : ]
else :
if not Value [ 0 ] :
EdkLogger . error ( " build " , FORMAT_INVALID , ' For Void* type PCD, when specify the Value in the command line, please use the following format: " string " , L " string " , H " { ...} " ' )
Value = ' " ' + Value + ' " '
IsValid , Cause = CheckPcdDatum ( PcdDatumType , Value )
if not IsValid :
EdkLogger . error ( " build " , FORMAT_INVALID , Cause , ExtraData = " %s . %s " % ( TokenSpaceGuidCName , TokenCName ) )
if PcdDatumType == ' BOOLEAN ' :
Value = Value . upper ( )
if Value == ' TRUE ' or Value == ' 1 ' :
Value = ' 1 '
elif Value == ' FALSE ' or Value == ' 0 ' :
Value = ' 0 '
return Value
2014-01-27 06:23:15 +01:00
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == ' __main__ ' :
pass