2009-07-17 11:10:31 +02:00
|
|
|
## @file
|
|
|
|
# process FD Region generation
|
|
|
|
#
|
2015-12-07 09:27:53 +01:00
|
|
|
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2010-05-18 07:04:32 +02:00
|
|
|
# This program and the accompanying materials
|
2009-07-17 11:10:31 +02:00
|
|
|
# are licensed and made available under the terms and conditions of the BSD License
|
|
|
|
# which accompanies this distribution. The full text of the license may be found at
|
|
|
|
# http://opensource.org/licenses/bsd-license.php
|
|
|
|
#
|
|
|
|
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
|
|
|
#
|
|
|
|
|
|
|
|
##
|
|
|
|
# Import Modules
|
|
|
|
#
|
|
|
|
from struct import *
|
|
|
|
from GenFdsGlobalVariable import GenFdsGlobalVariable
|
|
|
|
import StringIO
|
BaseTools/GenFds: speed up Region.PadBuffer()
The current implementation calls both pack() and Buffer.write() Size
times. The new implementation calls both of these methods only once; the
full data to write are constructed locally [1]. The range() function is
replaced by xrange() because the latter is supposed to be faster / lighter
weight [2].
On my laptop, I tested the change as follows: I pre-built the series at
[3] with
build -a X64 -p OvmfPkg/OvmfPkgX64.dsc -t GCC48 -b DEBUG \
-D HTTP_BOOT_ENABLE -D SECURE_BOOT_ENABLE
(The series at [3] is relevant because it increases the size of one of the
padded regions by 8.5 MB, slowing down the build quite a bit.)
With all source code already compiled, repeating the above command takes
approximately 45 seconds. With the patch applied, it goes down to 29
seconds.
[1] http://stackoverflow.com/questions/27384093/fastest-way-to-write-huge-data-in-file
[2] https://docs.python.org/2/library/functions.html?highlight=xrange#xrange
[3] http://thread.gmane.org/gmane.comp.bios.edk2.devel/14214
We can also measure the impact with a synthetic test:
> import timeit
>
> test_old = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadData = 0xFF
> for i in range(0, Size):
> Buffer.write(struct.pack('B', PadData))
> """
>
> test_new = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadByte = struct.pack('B', 0xFF)
> PadData = string.join(PadByte for i in xrange(0, Size))
> Buffer.write(PadData)
> """
>
> print(timeit.repeat(stmt=test_old, number=1, repeat=3))
> print(timeit.repeat(stmt=test_new, number=1, repeat=3))
The output is
[8.231637001037598, 8.81188416481018, 8.948754072189331]
[0.5503702163696289, 0.5461571216583252, 0.578315019607544]
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2016-07-11 16:26:31 +02:00
|
|
|
import string
|
2009-07-17 11:10:31 +02:00
|
|
|
from CommonDataClass.FdfClass import RegionClassObject
|
2014-08-15 05:06:48 +02:00
|
|
|
import Common.LongFilePathOs as os
|
2009-09-11 05:14:43 +02:00
|
|
|
from stat import *
|
2009-07-17 11:10:31 +02:00
|
|
|
from Common import EdkLogger
|
|
|
|
from Common.BuildToolError import *
|
2014-08-15 05:06:48 +02:00
|
|
|
from Common.LongFilePathSupport import OpenLongFilePath as open
|
2015-10-08 11:27:14 +02:00
|
|
|
from Common.MultipleWorkspace import MultipleWorkspace as mws
|
2009-07-17 11:10:31 +02:00
|
|
|
|
|
|
|
## generate Region
|
|
|
|
#
|
|
|
|
#
|
|
|
|
class Region(RegionClassObject):
|
|
|
|
|
|
|
|
## The constructor
|
|
|
|
#
|
|
|
|
# @param self The object pointer
|
|
|
|
#
|
|
|
|
def __init__(self):
|
|
|
|
RegionClassObject.__init__(self)
|
|
|
|
|
|
|
|
|
2016-07-11 16:17:23 +02:00
|
|
|
## PadBuffer()
|
|
|
|
#
|
|
|
|
# Add padding bytes to the Buffer
|
|
|
|
#
|
|
|
|
# @param Buffer The buffer the generated region data will be put
|
|
|
|
# in
|
|
|
|
# @param ErasePolarity Flash erase polarity
|
|
|
|
# @param Size Number of padding bytes requested
|
|
|
|
#
|
|
|
|
|
|
|
|
def PadBuffer(self, Buffer, ErasePolarity, Size):
|
|
|
|
if Size > 0:
|
|
|
|
if (ErasePolarity == '1') :
|
BaseTools/GenFds: speed up Region.PadBuffer()
The current implementation calls both pack() and Buffer.write() Size
times. The new implementation calls both of these methods only once; the
full data to write are constructed locally [1]. The range() function is
replaced by xrange() because the latter is supposed to be faster / lighter
weight [2].
On my laptop, I tested the change as follows: I pre-built the series at
[3] with
build -a X64 -p OvmfPkg/OvmfPkgX64.dsc -t GCC48 -b DEBUG \
-D HTTP_BOOT_ENABLE -D SECURE_BOOT_ENABLE
(The series at [3] is relevant because it increases the size of one of the
padded regions by 8.5 MB, slowing down the build quite a bit.)
With all source code already compiled, repeating the above command takes
approximately 45 seconds. With the patch applied, it goes down to 29
seconds.
[1] http://stackoverflow.com/questions/27384093/fastest-way-to-write-huge-data-in-file
[2] https://docs.python.org/2/library/functions.html?highlight=xrange#xrange
[3] http://thread.gmane.org/gmane.comp.bios.edk2.devel/14214
We can also measure the impact with a synthetic test:
> import timeit
>
> test_old = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadData = 0xFF
> for i in range(0, Size):
> Buffer.write(struct.pack('B', PadData))
> """
>
> test_new = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadByte = struct.pack('B', 0xFF)
> PadData = string.join(PadByte for i in xrange(0, Size))
> Buffer.write(PadData)
> """
>
> print(timeit.repeat(stmt=test_old, number=1, repeat=3))
> print(timeit.repeat(stmt=test_new, number=1, repeat=3))
The output is
[8.231637001037598, 8.81188416481018, 8.948754072189331]
[0.5503702163696289, 0.5461571216583252, 0.578315019607544]
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2016-07-11 16:26:31 +02:00
|
|
|
PadByte = pack('B', 0xFF)
|
2016-07-11 16:17:23 +02:00
|
|
|
else:
|
BaseTools/GenFds: speed up Region.PadBuffer()
The current implementation calls both pack() and Buffer.write() Size
times. The new implementation calls both of these methods only once; the
full data to write are constructed locally [1]. The range() function is
replaced by xrange() because the latter is supposed to be faster / lighter
weight [2].
On my laptop, I tested the change as follows: I pre-built the series at
[3] with
build -a X64 -p OvmfPkg/OvmfPkgX64.dsc -t GCC48 -b DEBUG \
-D HTTP_BOOT_ENABLE -D SECURE_BOOT_ENABLE
(The series at [3] is relevant because it increases the size of one of the
padded regions by 8.5 MB, slowing down the build quite a bit.)
With all source code already compiled, repeating the above command takes
approximately 45 seconds. With the patch applied, it goes down to 29
seconds.
[1] http://stackoverflow.com/questions/27384093/fastest-way-to-write-huge-data-in-file
[2] https://docs.python.org/2/library/functions.html?highlight=xrange#xrange
[3] http://thread.gmane.org/gmane.comp.bios.edk2.devel/14214
We can also measure the impact with a synthetic test:
> import timeit
>
> test_old = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadData = 0xFF
> for i in range(0, Size):
> Buffer.write(struct.pack('B', PadData))
> """
>
> test_new = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadByte = struct.pack('B', 0xFF)
> PadData = string.join(PadByte for i in xrange(0, Size))
> Buffer.write(PadData)
> """
>
> print(timeit.repeat(stmt=test_old, number=1, repeat=3))
> print(timeit.repeat(stmt=test_new, number=1, repeat=3))
The output is
[8.231637001037598, 8.81188416481018, 8.948754072189331]
[0.5503702163696289, 0.5461571216583252, 0.578315019607544]
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2016-07-11 16:26:31 +02:00
|
|
|
PadByte = pack('B', 0)
|
2016-07-12 15:52:38 +02:00
|
|
|
PadData = ''.join(PadByte for i in xrange(0, Size))
|
BaseTools/GenFds: speed up Region.PadBuffer()
The current implementation calls both pack() and Buffer.write() Size
times. The new implementation calls both of these methods only once; the
full data to write are constructed locally [1]. The range() function is
replaced by xrange() because the latter is supposed to be faster / lighter
weight [2].
On my laptop, I tested the change as follows: I pre-built the series at
[3] with
build -a X64 -p OvmfPkg/OvmfPkgX64.dsc -t GCC48 -b DEBUG \
-D HTTP_BOOT_ENABLE -D SECURE_BOOT_ENABLE
(The series at [3] is relevant because it increases the size of one of the
padded regions by 8.5 MB, slowing down the build quite a bit.)
With all source code already compiled, repeating the above command takes
approximately 45 seconds. With the patch applied, it goes down to 29
seconds.
[1] http://stackoverflow.com/questions/27384093/fastest-way-to-write-huge-data-in-file
[2] https://docs.python.org/2/library/functions.html?highlight=xrange#xrange
[3] http://thread.gmane.org/gmane.comp.bios.edk2.devel/14214
We can also measure the impact with a synthetic test:
> import timeit
>
> test_old = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadData = 0xFF
> for i in range(0, Size):
> Buffer.write(struct.pack('B', PadData))
> """
>
> test_new = """
> import struct, string, StringIO
> Size = (8 * 1024 + 512) * 1024
> Buffer = StringIO.StringIO()
> PadByte = struct.pack('B', 0xFF)
> PadData = string.join(PadByte for i in xrange(0, Size))
> Buffer.write(PadData)
> """
>
> print(timeit.repeat(stmt=test_old, number=1, repeat=3))
> print(timeit.repeat(stmt=test_new, number=1, repeat=3))
The output is
[8.231637001037598, 8.81188416481018, 8.948754072189331]
[0.5503702163696289, 0.5461571216583252, 0.578315019607544]
Cc: Yonghong Zhu <yonghong.zhu@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
2016-07-11 16:26:31 +02:00
|
|
|
Buffer.write(PadData)
|
2016-07-11 16:17:23 +02:00
|
|
|
|
2009-07-17 11:10:31 +02:00
|
|
|
## AddToBuffer()
|
|
|
|
#
|
|
|
|
# Add region data to the Buffer
|
|
|
|
#
|
|
|
|
# @param self The object pointer
|
|
|
|
# @param Buffer The buffer generated region data will be put
|
|
|
|
# @param BaseAddress base address of region
|
|
|
|
# @param BlockSize block size of region
|
|
|
|
# @param BlockNum How many blocks in region
|
|
|
|
# @param ErasePolarity Flash erase polarity
|
|
|
|
# @param VtfDict VTF objects
|
|
|
|
# @param MacroDict macro value pair
|
|
|
|
# @retval string Generated FV file path
|
|
|
|
#
|
|
|
|
|
2015-12-01 05:22:16 +01:00
|
|
|
def AddToBuffer(self, Buffer, BaseAddress, BlockSizeList, ErasePolarity, ImageBinDict, vtfDict=None, MacroDict={}):
|
2009-07-17 11:10:31 +02:00
|
|
|
Size = self.Size
|
2009-09-11 05:14:43 +02:00
|
|
|
GenFdsGlobalVariable.InfLogger('\nGenerate Region at Offset 0x%X' % self.Offset)
|
2015-12-01 05:22:16 +01:00
|
|
|
GenFdsGlobalVariable.InfLogger(" Region Size = 0x%X" % Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
GenFdsGlobalVariable.SharpCounter = 0
|
|
|
|
|
|
|
|
if self.RegionType == 'FV':
|
|
|
|
#
|
|
|
|
# Get Fv from FvDict
|
|
|
|
#
|
|
|
|
self.FvAddress = int(BaseAddress, 16) + self.Offset
|
2015-12-01 05:22:16 +01:00
|
|
|
FvBaseAddress = '0x%X' % self.FvAddress
|
|
|
|
FvOffset = 0
|
2009-07-17 11:10:31 +02:00
|
|
|
for RegionData in self.RegionDataList:
|
2009-09-11 05:14:43 +02:00
|
|
|
FileName = None
|
2009-07-17 11:10:31 +02:00
|
|
|
if RegionData.endswith(".fv"):
|
|
|
|
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
|
2015-12-01 05:22:16 +01:00
|
|
|
GenFdsGlobalVariable.InfLogger(' Region FV File Name = .fv : %s' % RegionData)
|
2009-07-17 11:10:31 +02:00
|
|
|
if RegionData[1] != ':' :
|
2015-12-17 10:38:43 +01:00
|
|
|
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
|
2009-07-17 11:10:31 +02:00
|
|
|
if not os.path.exists(RegionData):
|
|
|
|
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
|
|
|
|
|
2009-09-11 05:14:43 +02:00
|
|
|
FileName = RegionData
|
|
|
|
elif RegionData.upper() + 'fv' in ImageBinDict.keys():
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
|
|
|
|
FileName = ImageBinDict[RegionData.upper() + 'fv']
|
|
|
|
else:
|
|
|
|
#
|
|
|
|
# Generate FvImage.
|
|
|
|
#
|
|
|
|
FvObj = None
|
|
|
|
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict.keys():
|
|
|
|
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(RegionData.upper())
|
|
|
|
|
|
|
|
if FvObj != None :
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
|
|
|
|
#
|
|
|
|
# Call GenFv tool
|
|
|
|
#
|
2010-11-15 03:51:34 +01:00
|
|
|
self.BlockInfoOfRegion(BlockSizeList, FvObj)
|
2009-09-11 05:14:43 +02:00
|
|
|
self.FvAddress = self.FvAddress + FvOffset
|
|
|
|
FvAlignValue = self.GetFvAlignValue(FvObj.FvAlignment)
|
|
|
|
if self.FvAddress % FvAlignValue != 0:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR,
|
|
|
|
"FV (%s) is NOT %s Aligned!" % (FvObj.UiFvName, FvObj.FvAlignment))
|
|
|
|
FvBuffer = StringIO.StringIO('')
|
2015-12-01 05:22:16 +01:00
|
|
|
FvBaseAddress = '0x%X' % self.FvAddress
|
2010-11-15 03:51:34 +01:00
|
|
|
BlockSize = None
|
|
|
|
BlockNum = None
|
2009-09-11 05:14:43 +02:00
|
|
|
FvObj.AddToBuffer(FvBuffer, FvBaseAddress, BlockSize, BlockNum, ErasePolarity, vtfDict)
|
|
|
|
if FvBuffer.len > Size:
|
|
|
|
FvBuffer.close()
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR,
|
|
|
|
"Size of FV (%s) is larger than Region Size 0x%X specified." % (RegionData, Size))
|
|
|
|
#
|
|
|
|
# Put the generated image into FD buffer.
|
|
|
|
#
|
|
|
|
Buffer.write(FvBuffer.getvalue())
|
|
|
|
FvBuffer.close()
|
|
|
|
FvOffset = FvOffset + FvBuffer.len
|
|
|
|
Size = Size - FvBuffer.len
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (RegionData))
|
|
|
|
#
|
|
|
|
# Add the exist Fv image into FD buffer
|
|
|
|
#
|
|
|
|
if FileName != None:
|
|
|
|
FileLength = os.stat(FileName)[ST_SIZE]
|
|
|
|
if FileLength > Size:
|
2009-07-17 11:10:31 +02:00
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR,
|
|
|
|
"Size of FV File (%s) is larger than Region Size 0x%X specified." \
|
|
|
|
% (RegionData, Size))
|
2016-09-14 07:59:01 +02:00
|
|
|
BinFile = open(FileName, 'rb')
|
2009-09-11 05:14:43 +02:00
|
|
|
Buffer.write(BinFile.read())
|
|
|
|
BinFile.close()
|
|
|
|
Size = Size - FileLength
|
|
|
|
#
|
|
|
|
# Pad the left buffer
|
|
|
|
#
|
2016-07-11 16:17:23 +02:00
|
|
|
self.PadBuffer(Buffer, ErasePolarity, Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2009-09-11 05:14:43 +02:00
|
|
|
if self.RegionType == 'CAPSULE':
|
|
|
|
#
|
|
|
|
# Get Capsule from Capsule Dict
|
|
|
|
#
|
|
|
|
for RegionData in self.RegionDataList:
|
|
|
|
if RegionData.endswith(".cap"):
|
|
|
|
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
|
2015-12-01 05:22:16 +01:00
|
|
|
GenFdsGlobalVariable.InfLogger(' Region CAPSULE Image Name = .cap : %s' % RegionData)
|
2009-09-11 05:14:43 +02:00
|
|
|
if RegionData[1] != ':' :
|
2015-12-17 10:38:43 +01:00
|
|
|
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
|
2009-09-11 05:14:43 +02:00
|
|
|
if not os.path.exists(RegionData):
|
|
|
|
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2009-09-11 05:14:43 +02:00
|
|
|
FileName = RegionData
|
|
|
|
elif RegionData.upper() + 'cap' in ImageBinDict.keys():
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
|
|
|
|
FileName = ImageBinDict[RegionData.upper() + 'cap']
|
|
|
|
else:
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2009-09-11 05:14:43 +02:00
|
|
|
# Generate Capsule image and Put it into FD buffer
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2009-09-11 05:14:43 +02:00
|
|
|
CapsuleObj = None
|
|
|
|
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.keys():
|
|
|
|
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[RegionData.upper()]
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2009-09-11 05:14:43 +02:00
|
|
|
if CapsuleObj != None :
|
|
|
|
CapsuleObj.CapsuleName = RegionData.upper()
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
|
|
|
|
#
|
|
|
|
# Call GenFv tool to generate Capsule Image
|
|
|
|
#
|
|
|
|
FileName = CapsuleObj.GenCapsule()
|
|
|
|
CapsuleObj.CapsuleName = None
|
|
|
|
else:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "Capsule (%s) is NOT described in FDF file!" % (RegionData))
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2009-09-11 05:14:43 +02:00
|
|
|
#
|
|
|
|
# Add the capsule image into FD buffer
|
|
|
|
#
|
|
|
|
FileLength = os.stat(FileName)[ST_SIZE]
|
|
|
|
if FileLength > Size:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR,
|
|
|
|
"Size 0x%X of Capsule File (%s) is larger than Region Size 0x%X specified." \
|
|
|
|
% (FileLength, RegionData, Size))
|
2016-09-14 07:59:01 +02:00
|
|
|
BinFile = open(FileName, 'rb')
|
2009-07-17 11:10:31 +02:00
|
|
|
Buffer.write(BinFile.read())
|
2009-09-11 05:14:43 +02:00
|
|
|
BinFile.close()
|
|
|
|
Size = Size - FileLength
|
|
|
|
#
|
|
|
|
# Pad the left buffer
|
|
|
|
#
|
2016-07-11 16:17:23 +02:00
|
|
|
self.PadBuffer(Buffer, ErasePolarity, Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2015-12-07 09:27:53 +01:00
|
|
|
if self.RegionType in ('FILE', 'INF'):
|
2009-07-17 11:10:31 +02:00
|
|
|
for RegionData in self.RegionDataList:
|
2015-12-07 09:27:53 +01:00
|
|
|
if self.RegionType == 'INF':
|
|
|
|
RegionData.__InfParse__(None)
|
|
|
|
if len(RegionData.BinFileList) != 1:
|
|
|
|
EdkLogger.error('GenFds', GENFDS_ERROR, 'INF in FD region can only contain one binary: %s' % RegionData)
|
|
|
|
File = RegionData.BinFileList[0]
|
|
|
|
RegionData = RegionData.PatchEfiFile(File.Path, File.Type)
|
|
|
|
else:
|
|
|
|
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
|
|
|
|
if RegionData[1] != ':' :
|
2015-12-17 10:38:43 +01:00
|
|
|
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
|
2015-12-07 09:27:53 +01:00
|
|
|
if not os.path.exists(RegionData):
|
|
|
|
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
|
2009-09-11 05:14:43 +02:00
|
|
|
#
|
|
|
|
# Add the file image into FD buffer
|
|
|
|
#
|
|
|
|
FileLength = os.stat(RegionData)[ST_SIZE]
|
|
|
|
if FileLength > Size:
|
2009-07-17 11:10:31 +02:00
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR,
|
2009-09-11 05:14:43 +02:00
|
|
|
"Size of File (%s) is larger than Region Size 0x%X specified." \
|
|
|
|
% (RegionData, Size))
|
2015-12-01 05:22:16 +01:00
|
|
|
GenFdsGlobalVariable.InfLogger(' Region File Name = %s' % RegionData)
|
|
|
|
BinFile = open(RegionData, 'rb')
|
2009-09-11 05:14:43 +02:00
|
|
|
Buffer.write(BinFile.read())
|
|
|
|
BinFile.close()
|
|
|
|
Size = Size - FileLength
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2009-09-11 05:14:43 +02:00
|
|
|
# Pad the left buffer
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2016-07-11 16:17:23 +02:00
|
|
|
self.PadBuffer(Buffer, ErasePolarity, Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
|
|
|
if self.RegionType == 'DATA' :
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = DATA')
|
|
|
|
DataSize = 0
|
|
|
|
for RegionData in self.RegionDataList:
|
|
|
|
Data = RegionData.split(',')
|
|
|
|
DataSize = DataSize + len(Data)
|
|
|
|
if DataSize > Size:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "Size of DATA is larger than Region Size ")
|
|
|
|
else:
|
|
|
|
for item in Data :
|
|
|
|
Buffer.write(pack('B', int(item, 16)))
|
2009-09-11 05:14:43 +02:00
|
|
|
Size = Size - DataSize
|
|
|
|
#
|
|
|
|
# Pad the left buffer
|
|
|
|
#
|
2016-07-11 16:17:23 +02:00
|
|
|
self.PadBuffer(Buffer, ErasePolarity, Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
|
|
|
if self.RegionType == None:
|
|
|
|
GenFdsGlobalVariable.InfLogger(' Region Name = None')
|
2016-07-11 16:17:23 +02:00
|
|
|
self.PadBuffer(Buffer, ErasePolarity, Size)
|
2009-07-17 11:10:31 +02:00
|
|
|
|
|
|
|
def GetFvAlignValue(self, Str):
|
|
|
|
AlignValue = 1
|
|
|
|
Granu = 1
|
|
|
|
Str = Str.strip().upper()
|
|
|
|
if Str.endswith('K'):
|
|
|
|
Granu = 1024
|
|
|
|
Str = Str[:-1]
|
|
|
|
elif Str.endswith('M'):
|
2015-12-01 05:22:16 +01:00
|
|
|
Granu = 1024 * 1024
|
2009-07-17 11:10:31 +02:00
|
|
|
Str = Str[:-1]
|
|
|
|
elif Str.endswith('G'):
|
2015-12-01 05:22:16 +01:00
|
|
|
Granu = 1024 * 1024 * 1024
|
2009-07-17 11:10:31 +02:00
|
|
|
Str = Str[:-1]
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
|
2015-12-01 05:22:16 +01:00
|
|
|
AlignValue = int(Str) * Granu
|
2009-07-17 11:10:31 +02:00
|
|
|
return AlignValue
|
2015-12-01 05:22:16 +01:00
|
|
|
|
2009-07-17 11:10:31 +02:00
|
|
|
## BlockSizeOfRegion()
|
|
|
|
#
|
|
|
|
# @param BlockSizeList List of block information
|
2010-11-15 03:51:34 +01:00
|
|
|
# @param FvObj The object for FV
|
2009-07-17 11:10:31 +02:00
|
|
|
#
|
2010-11-15 03:51:34 +01:00
|
|
|
def BlockInfoOfRegion(self, BlockSizeList, FvObj):
|
|
|
|
Start = 0
|
|
|
|
End = 0
|
|
|
|
RemindingSize = self.Size
|
|
|
|
ExpectedList = []
|
|
|
|
for (BlockSize, BlockNum, pcd) in BlockSizeList:
|
|
|
|
End = Start + BlockSize * BlockNum
|
|
|
|
# region not started yet
|
|
|
|
if self.Offset >= End:
|
|
|
|
Start = End
|
|
|
|
continue
|
|
|
|
# region located in current blocks
|
|
|
|
else:
|
|
|
|
# region ended within current blocks
|
|
|
|
if self.Offset + self.Size <= End:
|
2015-12-01 05:22:16 +01:00
|
|
|
ExpectedList.append((BlockSize, (RemindingSize + BlockSize - 1) / BlockSize))
|
2010-11-15 03:51:34 +01:00
|
|
|
break
|
|
|
|
# region not ended yet
|
|
|
|
else:
|
|
|
|
# region not started in middle of current blocks
|
|
|
|
if self.Offset <= Start:
|
|
|
|
UsedBlockNum = BlockNum
|
|
|
|
# region started in middle of current blocks
|
|
|
|
else:
|
2015-12-01 05:22:16 +01:00
|
|
|
UsedBlockNum = (End - self.Offset) / BlockSize
|
2010-11-15 03:51:34 +01:00
|
|
|
Start = End
|
|
|
|
ExpectedList.append((BlockSize, UsedBlockNum))
|
|
|
|
RemindingSize -= BlockSize * UsedBlockNum
|
2015-12-01 05:22:16 +01:00
|
|
|
|
2010-11-15 03:51:34 +01:00
|
|
|
if FvObj.BlockSizeList == []:
|
|
|
|
FvObj.BlockSizeList = ExpectedList
|
|
|
|
else:
|
|
|
|
# first check whether FvObj.BlockSizeList items have only "BlockSize" or "NumBlocks",
|
|
|
|
# if so, use ExpectedList
|
|
|
|
for Item in FvObj.BlockSizeList:
|
|
|
|
if Item[0] == None or Item[1] == None:
|
|
|
|
FvObj.BlockSizeList = ExpectedList
|
|
|
|
break
|
|
|
|
# make sure region size is no smaller than the summed block size in FV
|
|
|
|
Sum = 0
|
|
|
|
for Item in FvObj.BlockSizeList:
|
|
|
|
Sum += Item[0] * Item[1]
|
|
|
|
if self.Size < Sum:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "Total Size of FV %s 0x%x is larger than Region Size 0x%x "
|
2015-12-01 05:22:16 +01:00
|
|
|
% (FvObj.UiFvName, Sum, self.Size))
|
2010-11-15 03:51:34 +01:00
|
|
|
# check whether the BlockStatements in FV section is appropriate
|
|
|
|
ExpectedListData = ''
|
|
|
|
for Item in ExpectedList:
|
2015-12-01 05:22:16 +01:00
|
|
|
ExpectedListData += "BlockSize = 0x%x\n\tNumBlocks = 0x%x\n\t" % Item
|
2010-11-15 03:51:34 +01:00
|
|
|
Index = 0
|
|
|
|
for Item in FvObj.BlockSizeList:
|
|
|
|
if Item[0] != ExpectedList[Index][0]:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
|
2015-12-01 05:22:16 +01:00
|
|
|
% FvObj.UiFvName, ExtraData=ExpectedListData)
|
2010-11-15 03:51:34 +01:00
|
|
|
elif Item[1] != ExpectedList[Index][1]:
|
|
|
|
if (Item[1] < ExpectedList[Index][1]) and (Index == len(FvObj.BlockSizeList) - 1):
|
|
|
|
break;
|
|
|
|
else:
|
|
|
|
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
|
2015-12-01 05:22:16 +01:00
|
|
|
% FvObj.UiFvName, ExtraData=ExpectedListData)
|
2010-11-15 03:51:34 +01:00
|
|
|
else:
|
|
|
|
Index += 1
|
2009-07-17 11:10:31 +02:00
|
|
|
|
2010-11-15 03:51:34 +01:00
|
|
|
|
2009-07-17 11:10:31 +02:00
|
|
|
|