[PATCH 00/25] binman: Support compression of sections

This series includes a number of improvements and refactors to support compressing entire sections. This is sometimes useful when a section contains a number of entries which are accessed as a whole and are best compressed together.
Most of the effort here is clarifying what is in a section and what is added by its parent (i.e. padding). But the opportunity is taken to tidy up and somewhat simplify some of the section-handling code. Also some new tests are added to reduce the amount of undefined behaviour.
This series is available at u-boot-dm/binman-working
Simon Glass (25): binman: Give a sensible error if no command is given binman: Fix return from u-boot-ucode if there is no DT binman: Remove references to 'image' in entry_Section binman: Expand the error message for breaching a section binman: Move CompressData() into Entry base class binman: Use 'files-compress' to set compression for files binman: Update testPackExtra with more checks binman: Expand docs and test for padding binman: Expand docs and test for alignment binman: Move section-building code into a function binman: Refactor _BuildSectionData() binman: Move section padding to the parent binman: Make section padding consistent with other entries binman: Store the original data before compression binman: Set section contents in GetData() binman: Avoid reporting image-pos with compression binman: Drop Entry.CheckOffset() binman: Move sort and expand to the main Pack() function binman: Drop the Entry.CheckSize() method binman: Call CheckSize() from the section's Pack() method binman: Drop CheckEntries() binman: Update CheckEntries() for compressed sections binman: Use the actual contents in CheckSize() binman: Support compression of sections binman: Avoid calculated section data repeatedly
tools/binman/README | 72 ++-- tools/binman/README.entries | 15 +- tools/binman/cmdline.py | 1 + tools/binman/control.py | 4 +- tools/binman/entry.py | 65 +++- tools/binman/etype/blob.py | 7 - tools/binman/etype/cbfs.py | 6 +- tools/binman/etype/files.py | 7 +- tools/binman/etype/section.py | 167 +++++++-- tools/binman/etype/u_boot_ucode.py | 1 + tools/binman/ftest.py | 347 +++++++++++++++++- tools/binman/image.py | 2 +- tools/binman/test/009_pack_extra.dts | 2 +- tools/binman/test/085_files_compress.dts | 2 +- tools/binman/test/177_section_pad.dts | 27 ++ tools/binman/test/178_section_align.dts | 34 ++ tools/binman/test/179_compress_image.dts | 14 + tools/binman/test/180_compress_image_less.dts | 14 + .../binman/test/181_compress_section_size.dts | 17 + tools/binman/test/182_compress_section.dts | 16 + tools/binman/test/183_compress_extra.dts | 37 ++ 21 files changed, 753 insertions(+), 104 deletions(-) create mode 100644 tools/binman/test/177_section_pad.dts create mode 100644 tools/binman/test/178_section_align.dts create mode 100644 tools/binman/test/179_compress_image.dts create mode 100644 tools/binman/test/180_compress_image_less.dts create mode 100644 tools/binman/test/181_compress_section_size.dts create mode 100644 tools/binman/test/182_compress_section.dts create mode 100644 tools/binman/test/183_compress_extra.dts

At present if 'binman' is typed on the command line, a strange error about a missing argument is displayed. Fix this.
These does not seem to be standard way to add the 'required' argument in all recent Python versions, so set it manually.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/cmdline.py | 1 + 1 file changed, 1 insertion(+)
diff --git a/tools/binman/cmdline.py b/tools/binman/cmdline.py index bb4d9d1288b..c007d0a036d 100644 --- a/tools/binman/cmdline.py +++ b/tools/binman/cmdline.py @@ -37,6 +37,7 @@ controlled by a description in the board device tree.''' '3=info, 4=detail, 5=debug')
subparsers = parser.add_subparsers(dest='cmd') + subparsers.required = True
build_parser = subparsers.add_parser('build', help='Build firmware image') build_parser.add_argument('-a', '--entry-arg', type=str, action='append',

This should return empty contents, not leave it unset. Fix it.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/u_boot_ucode.py | 1 + 1 file changed, 1 insertion(+)
diff --git a/tools/binman/etype/u_boot_ucode.py b/tools/binman/etype/u_boot_ucode.py index 44622936182..b4cb8cdb6e1 100644 --- a/tools/binman/etype/u_boot_ucode.py +++ b/tools/binman/etype/u_boot_ucode.py @@ -81,6 +81,7 @@ class Entry_u_boot_ucode(Entry_blob): if fdt_entry: break if not fdt_entry: + self.data = b'' return True if not fdt_entry.ready: return False

While a section is the base class of Image, it is more correct to refer to sections in most places in this file. Fix these comments.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 515c97f9290..327750461eb 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -56,7 +56,7 @@ class Entry_section(Entry): self._end_4gb = False
def ReadNode(self): - """Read properties from the image node""" + """Read properties from the section node""" super().ReadNode() self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0) self._sort = fdt_util.GetBool(self._node, 'sort-by-offset') @@ -183,7 +183,7 @@ class Entry_section(Entry): return super().Pack(offset)
def _PackEntries(self): - """Pack all entries into the image""" + """Pack all entries into the section""" offset = self._skip_at_start for entry in self._entries.values(): offset = entry.Pack(offset) @@ -209,7 +209,7 @@ class Entry_section(Entry): self._entries[entry._node.name] = entry
def CheckEntries(self): - """Check that entries do not overlap or extend outside the image""" + """Check that entries do not overlap or extend outside the section""" if self._sort: self._SortEntries() self._ExpandEntries() @@ -456,7 +456,7 @@ class Entry_section(Entry):
def CheckSize(self): - """Check that the image contents does not exceed its size, etc.""" + """Check that the section contents does not exceed its size, etc.""" contents_size = 0 for entry in self._entries.values(): contents_size = max(contents_size, entry.offset + entry.size)

Add in a few more details to this error message to make it easier to see what is going on.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 10 ++++++---- tools/binman/ftest.py | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 327750461eb..a3e37c33c1b 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -220,10 +220,12 @@ class Entry_section(Entry): if (entry.offset < self._skip_at_start or entry.offset + entry.size > self._skip_at_start + self.size): - entry.Raise("Offset %#x (%d) is outside the section starting " - "at %#x (%d)" % - (entry.offset, entry.offset, self._skip_at_start, - self._skip_at_start)) + entry.Raise('Offset %#x (%d) size %#x (%d) is outside the ' + "section '%s' starting at %#x (%d) " + 'of size %#x (%d)' % + (entry.offset, entry.offset, entry.size, entry.size, + self._node.path, self._skip_at_start, + self._skip_at_start, self.size, self.size)) if entry.offset < offset and entry.size: entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' " "ending at %#x (%d)" % diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index b771b9d5df7..8e6222ad785 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -968,8 +968,9 @@ class TestFunctional(unittest.TestCase): """Test that the end-at-4gb property checks for offset boundaries""" with self.assertRaises(ValueError) as e: self._DoTestFile('028_pack_4gb_outside.dts') - self.assertIn("Node '/binman/u-boot': Offset 0x0 (0) is outside " - "the section starting at 0xffffffe0 (4294967264)", + self.assertIn("Node '/binman/u-boot': Offset 0x0 (0) size 0x4 (4) " + "is outside the section '/binman' starting at " + '0xffffffe0 (4294967264) of size 0x20 (32)', str(e.exception))
def testPackX86Rom(self):

At present this is only used by blobs. To allow it to be used by other entry types (such as sections), move it into the base class.
Also read the compression type in the base class.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/entry.py | 17 +++++++++++++++++ tools/binman/etype/blob.py | 7 ------- 2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/tools/binman/entry.py b/tools/binman/entry.py index f7adc3b1abb..173c9131cbb 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -180,6 +180,9 @@ class Entry(object): self.expand_size = fdt_util.GetBool(self._node, 'expand-size') self.missing_msg = fdt_util.GetString(self._node, 'missing-msg')
+ # This is only supported by blobs and sections at present + self.compress = fdt_util.GetString(self._node, 'compress', 'none') + def GetDefaultFilename(self): return None
@@ -836,3 +839,17 @@ features to produce new behaviours. list of possible tags, most desirable first """ return list(filter(None, [self.missing_msg, self.name, self.etype])) + + def CompressData(self, indata): + """Compress data according to the entry's compression method + + Args: + indata: Data to compress + + Returns: + Compressed data (first word is the compressed size) + """ + if self.compress != 'none': + self.uncomp_size = len(indata) + data = tools.Compress(indata, self.compress) + return data diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py index ecfb1e476e8..301ac55e3b2 100644 --- a/tools/binman/etype/blob.py +++ b/tools/binman/etype/blob.py @@ -33,7 +33,6 @@ class Entry_blob(Entry): def __init__(self, section, etype, node): super().__init__(section, etype, node) self._filename = fdt_util.GetString(self._node, 'filename', self.etype) - self.compress = fdt_util.GetString(self._node, 'compress', 'none')
def ObtainContents(self): self._filename = self.GetDefaultFilename() @@ -48,12 +47,6 @@ class Entry_blob(Entry): self.ReadBlobContents() return True
- def CompressData(self, indata): - if self.compress != 'none': - self.uncomp_size = len(indata) - data = tools.Compress(indata, self.compress) - return data - def ReadBlobContents(self): """Read blob contents into memory

At present we use 'compress' as the property to set the compression of a 'files' entry. But this conflicts with the same property for entries, of which Entry_section is a subclass.
Strictly speaking, since Entry_files is in fact a subclass of Entry_section, the files can be compressed individually but also the section (that contains all the files) can itself be compressed. With this change, it is possible to express that.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README.entries | 15 ++++++++++++++- tools/binman/etype/files.py | 7 ++++--- tools/binman/etype/section.py | 4 ++-- tools/binman/test/085_files_compress.dts | 2 +- 4 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/tools/binman/README.entries b/tools/binman/README.entries index c1d436563e8..a3a314753c5 100644 --- a/tools/binman/README.entries +++ b/tools/binman/README.entries @@ -299,7 +299,7 @@ Entry: files: Entry containing a set of files
Properties / Entry arguments: - pattern: Filename pattern to match the files to include - - compress: Compression algorithm to use: + - files-compress: Compression algorithm to use: none: No compression lz4: Use lz4 compression (via 'lz4' command-line utility)
@@ -406,6 +406,10 @@ The 'default' property, if present, will be automatically set to the name if of configuration whose devicetree matches the 'default-dt' entry argument, e.g. with '-a default-dt=sun50i-a64-pine64-lts'.
+Available substitutions for '@' property values are: + + DEFAULT-SEQ Sequence number of the default fdt,as provided by the + 'default-dt' entry argument
Properties (in the 'fit' node itself): fit,external-offset: Indicates that the contents of the FIT are external @@ -878,6 +882,15 @@ relocated to any address for execution.
+Entry: u-boot-env: An entry which contains a U-Boot environment +--------------------------------------------------------------- + +Properties / Entry arguments: + - filename: File containing the environment text, with each line in the + form var=value + + + Entry: u-boot-img: U-Boot legacy image --------------------------------------
diff --git a/tools/binman/etype/files.py b/tools/binman/etype/files.py index 9adb3afeb14..ce3832e3cdd 100644 --- a/tools/binman/etype/files.py +++ b/tools/binman/etype/files.py @@ -19,7 +19,7 @@ class Entry_files(Entry_section):
Properties / Entry arguments: - pattern: Filename pattern to match the files to include - - compress: Compression algorithm to use: + - files-compress: Compression algorithm to use: none: No compression lz4: Use lz4 compression (via 'lz4' command-line utility)
@@ -36,7 +36,8 @@ class Entry_files(Entry_section): self._pattern = fdt_util.GetString(self._node, 'pattern') if not self._pattern: self.Raise("Missing 'pattern' property") - self._compress = fdt_util.GetString(self._node, 'compress', 'none') + self._files_compress = fdt_util.GetString(self._node, 'files-compress', + 'none') self._require_matches = fdt_util.GetBool(self._node, 'require-matches')
@@ -53,7 +54,7 @@ class Entry_files(Entry_section): subnode = state.AddSubnode(self._node, name) state.AddString(subnode, 'type', 'blob') state.AddString(subnode, 'filename', fname) - state.AddString(subnode, 'compress', self._compress) + state.AddString(subnode, 'compress', self._files_compress)
# Read entries again, now that we have some self._ReadEntries() diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index a3e37c33c1b..9222042f5d8 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -160,7 +160,7 @@ class Entry_section(Entry): section_data += tools.GetBytes(self._pad_byte, pad) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data))) - return section_data + return self.CompressData(section_data)
def GetOffsets(self): """Handle entries that want to set the offset/size of other entries @@ -414,7 +414,7 @@ class Entry_section(Entry): return None
def GetEntryContents(self): - """Call ObtainContents() for the section + """Call ObtainContents() for each entry in the section """ todo = self._entries.values() for passnum in range(3): diff --git a/tools/binman/test/085_files_compress.dts b/tools/binman/test/085_files_compress.dts index 847b398bf2b..5aeead2e6e9 100644 --- a/tools/binman/test/085_files_compress.dts +++ b/tools/binman/test/085_files_compress.dts @@ -5,7 +5,7 @@ binman { files { pattern = "files/*.dat"; - compress = "lz4"; + files-compress = "lz4"; }; }; };

On 19/10/2020 05:41, Simon Glass wrote:
At present we use 'compress' as the property to set the compression of a 'files' entry. But this conflicts with the same property for entries, of which Entry_section is a subclass.
Strictly speaking, since Entry_files is in fact a subclass of Entry_section, the files can be compressed individually but also the section (that contains all the files) can itself be compressed. With this change, it is possible to express that.
Signed-off-by: Simon Glass sjg@chromium.org
tools/binman/README.entries | 15 ++++++++++++++- tools/binman/etype/files.py | 7 ++++--- tools/binman/etype/section.py | 4 ++-- tools/binman/test/085_files_compress.dts | 2 +- 4 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/tools/binman/README.entries b/tools/binman/README.entries index c1d436563e8..a3a314753c5 100644 --- a/tools/binman/README.entries +++ b/tools/binman/README.entries @@ -299,7 +299,7 @@ Entry: files: Entry containing a set of files
Properties / Entry arguments: - pattern: Filename pattern to match the files to include
- compress: Compression algorithm to use:
- files-compress: Compression algorithm to use: none: No compression lz4: Use lz4 compression (via 'lz4' command-line utility)
@@ -406,6 +406,10 @@ The 'default' property, if present, will be automatically set to the name if of configuration whose devicetree matches the 'default-dt' entry argument, e.g. with '-a default-dt=sun50i-a64-pine64-lts'.
+Available substitutions for '@' property values are:
- DEFAULT-SEQ Sequence number of the default fdt,as provided by the
'default-dt' entry argument
I think this and the next hunk slipped in because you're regenerating a stale file here, wanted to point it out in case you'd prefer them to be in an independent commit.
Properties (in the 'fit' node itself): fit,external-offset: Indicates that the contents of the FIT are external @@ -878,6 +882,15 @@ relocated to any address for execution.
+Entry: u-boot-env: An entry which contains a U-Boot environment +---------------------------------------------------------------
+Properties / Entry arguments:
- filename: File containing the environment text, with each line in the
form var=value
Same as above.
Entry: u-boot-img: U-Boot legacy image
diff --git a/tools/binman/etype/files.py b/tools/binman/etype/files.py index 9adb3afeb14..ce3832e3cdd 100644 --- a/tools/binman/etype/files.py +++ b/tools/binman/etype/files.py @@ -19,7 +19,7 @@ class Entry_files(Entry_section):
Properties / Entry arguments: - pattern: Filename pattern to match the files to include
- compress: Compression algorithm to use:
- files-compress: Compression algorithm to use: none: No compression lz4: Use lz4 compression (via 'lz4' command-line utility)
@@ -36,7 +36,8 @@ class Entry_files(Entry_section): self._pattern = fdt_util.GetString(self._node, 'pattern') if not self._pattern: self.Raise("Missing 'pattern' property")
self._compress = fdt_util.GetString(self._node, 'compress', 'none')
self._files_compress = fdt_util.GetString(self._node, 'files-compress',
'none') self._require_matches = fdt_util.GetBool(self._node, 'require-matches')
@@ -53,7 +54,7 @@ class Entry_files(Entry_section): subnode = state.AddSubnode(self._node, name) state.AddString(subnode, 'type', 'blob') state.AddString(subnode, 'filename', fname)
state.AddString(subnode, 'compress', self._compress)
state.AddString(subnode, 'compress', self._files_compress) # Read entries again, now that we have some self._ReadEntries()
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index a3e37c33c1b..9222042f5d8 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -160,7 +160,7 @@ class Entry_section(Entry): section_data += tools.GetBytes(self._pad_byte, pad) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data)))
return section_data
return self.CompressData(section_data)
def GetOffsets(self): """Handle entries that want to set the offset/size of other entries
@@ -414,7 +414,7 @@ class Entry_section(Entry): return None
def GetEntryContents(self):
"""Call ObtainContents() for the section
"""Call ObtainContents() for each entry in the section """ todo = self._entries.values() for passnum in range(3):
diff --git a/tools/binman/test/085_files_compress.dts b/tools/binman/test/085_files_compress.dts index 847b398bf2b..5aeead2e6e9 100644 --- a/tools/binman/test/085_files_compress.dts +++ b/tools/binman/test/085_files_compress.dts @@ -5,7 +5,7 @@ binman { files { pattern = "files/*.dat";
compress = "lz4";
}; };files-compress = "lz4";
};

Check the contents of each section to make sure it is actually in the right place.
Also fix a whitespace error in the .dts file.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/ftest.py | 27 ++++++++++++++++++++++----- tools/binman/test/009_pack_extra.dts | 2 +- 2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 8e6222ad785..643663d44ec 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -783,9 +783,8 @@ class TestFunctional(unittest.TestCase):
def testPackExtra(self): """Test that extra packing feature works as expected""" - retcode = self._DoTestFile('009_pack_extra.dts') + data = self._DoReadFile('009_pack_extra.dts')
- self.assertEqual(0, retcode) self.assertIn('image', control.images) image = control.images['image'] entries = image.GetEntries() @@ -797,30 +796,48 @@ class TestFunctional(unittest.TestCase): self.assertEqual(0, entry.offset) self.assertEqual(3, entry.pad_before) self.assertEqual(3 + 5 + len(U_BOOT_DATA), entry.size) + self.assertEqual(U_BOOT_DATA, entry.data) + self.assertEqual(tools.GetBytes(0, 3) + U_BOOT_DATA + + tools.GetBytes(0, 5), data[:entry.size]) + pos = entry.size
# Second u-boot has an aligned size, but it has no effect self.assertIn('u-boot-align-size-nop', entries) entry = entries['u-boot-align-size-nop'] - self.assertEqual(12, entry.offset) - self.assertEqual(4, entry.size) + self.assertEqual(pos, entry.offset) + self.assertEqual(len(U_BOOT_DATA), entry.size) + self.assertEqual(U_BOOT_DATA, entry.data) + self.assertEqual(U_BOOT_DATA, data[pos:pos + entry.size]) + pos += entry.size
# Third u-boot has an aligned size too self.assertIn('u-boot-align-size', entries) entry = entries['u-boot-align-size'] - self.assertEqual(16, entry.offset) + self.assertEqual(pos, entry.offset) self.assertEqual(32, entry.size) + self.assertEqual(U_BOOT_DATA, entry.data) + self.assertEqual(U_BOOT_DATA + tools.GetBytes(0, 32 - len(U_BOOT_DATA)), + data[pos:pos + entry.size]) + pos += entry.size
# Fourth u-boot has an aligned end self.assertIn('u-boot-align-end', entries) entry = entries['u-boot-align-end'] self.assertEqual(48, entry.offset) self.assertEqual(16, entry.size) + self.assertEqual(U_BOOT_DATA, entry.data[:len(U_BOOT_DATA)]) + self.assertEqual(U_BOOT_DATA + tools.GetBytes(0, 16 - len(U_BOOT_DATA)), + data[pos:pos + entry.size]) + pos += entry.size
# Fifth u-boot immediately afterwards self.assertIn('u-boot-align-both', entries) entry = entries['u-boot-align-both'] self.assertEqual(64, entry.offset) self.assertEqual(64, entry.size) + self.assertEqual(U_BOOT_DATA, entry.data[:len(U_BOOT_DATA)]) + self.assertEqual(U_BOOT_DATA + tools.GetBytes(0, 64 - len(U_BOOT_DATA)), + data[pos:pos + entry.size])
self.CheckNoGaps(entries) self.assertEqual(128, image.size) diff --git a/tools/binman/test/009_pack_extra.dts b/tools/binman/test/009_pack_extra.dts index 0765707dea2..1b315557716 100644 --- a/tools/binman/test/009_pack_extra.dts +++ b/tools/binman/test/009_pack_extra.dts @@ -28,7 +28,7 @@
u-boot-align-both { type = "u-boot"; - align= <64>; + align = <64>; align-end = <128>; }; };

Padding becomes part of the entry once the image is written out, but within binman the entry contents does not include the padding. Add documentation to make this clear, as well as a test.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README | 12 +++++++++--- tools/binman/entry.py | 11 ++++++++--- tools/binman/ftest.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 7 deletions(-)
diff --git a/tools/binman/README b/tools/binman/README index fbcfdc77c3e..0433cabce4f 100644 --- a/tools/binman/README +++ b/tools/binman/README @@ -290,14 +290,20 @@ size:
pad-before: Padding before the contents of the entry. Normally this is 0, meaning - that the contents start at the beginning of the entry. This can be - offset the entry contents a little. Defaults to 0. + that the contents start at the beginning of the entry. This can be used + to offset the entry contents a little. While this does not affect the + contents of the entry within binman itself (the padding is performed + only when its parent section is assembled), the end result will be that + the entry starts with the padding bytes, so may grow. Defaults to 0.
pad-after: Padding after the contents of the entry. Normally this is 0, meaning that the entry ends at the last byte of content (unless adjusted by other properties). This allows room to be created in the image for - this entry to expand later. Defaults to 0. + this entry to expand later. While this does not affect the contents of + the entry within binman itself (the padding is performed only when its + parent section is assembled), the end result will be that the entry ends + with the padding bytes, so may grow. Defaults to 0.
align-size: This sets the alignment of the entry size. For example, to ensure diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 173c9131cbb..e5d0aa52bd6 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -51,9 +51,14 @@ class Entry(object): align: Entry start offset alignment, or None align_size: Entry size alignment, or None align_end: Entry end offset alignment, or None - pad_before: Number of pad bytes before the contents, 0 if none - pad_after: Number of pad bytes after the contents, 0 if none - data: Contents of entry (string of bytes) + pad_before: Number of pad bytes before the contents when it is placed + in the containing section, 0 if none. The pad bytes become part of + the entry. + pad_after: Number of pad bytes after the contents when it is placed in + the containing section, 0 if none. The pad bytes become part of + the entry. + data: Contents of entry (string of bytes). This does not include + padding created by pad_before or pad_after compress: Compression algoithm used (e.g. 'lz4'), 'none' if none orig_offset: Original offset value read from node orig_size: Original size value read from node diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 643663d44ec..74ee7606cca 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -3546,12 +3546,39 @@ class TestFunctional(unittest.TestCase):
def testPadInSections(self): """Test pad-before, pad-after for entries in sections""" - data = self._DoReadFile('166_pad_in_sections.dts') + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '166_pad_in_sections.dts', update_dtb=True) expected = (U_BOOT_DATA + tools.GetBytes(ord('!'), 12) + U_BOOT_DATA + tools.GetBytes(ord('!'), 6) + U_BOOT_DATA) self.assertEqual(expected, data)
+ dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['size', 'image-pos', 'offset']) + expected = { + 'image-pos': 0, + 'offset': 0, + 'size': 12 + 6 + 3 * len(U_BOOT_DATA), + + 'section:image-pos': 0, + 'section:offset': 0, + 'section:size': 12 + 6 + 3 * len(U_BOOT_DATA), + + 'section/before:image-pos': 0, + 'section/before:offset': 0, + 'section/before:size': len(U_BOOT_DATA), + + 'section/u-boot:image-pos': 4, + 'section/u-boot:offset': 4, + 'section/u-boot:size': 12 + len(U_BOOT_DATA) + 6, + + 'section/after:image-pos': 26, + 'section/after:offset': 26, + 'section/after:size': len(U_BOOT_DATA), + } + self.assertEqual(expected, props) + def testFitImageSubentryAlignment(self): """Test relative alignability of FIT image subentries""" entry_args = {

Alignment does form part of the entry once the image is written out, but within binman the entry contents does not include the padding. Add documentation to make this clear, as well as a test.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README | 29 ++++++++++++++++++++--------- tools/binman/entry.py | 6 ++++-- tools/binman/ftest.py | 33 ++++++++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/tools/binman/README b/tools/binman/README index 0433cabce4f..0dee71d1b22 100644 --- a/tools/binman/README +++ b/tools/binman/README @@ -278,9 +278,12 @@ offset:
align: This sets the alignment of the entry. The entry offset is adjusted - so that the entry starts on an aligned boundary within the image. For - example 'align = <16>' means that the entry will start on a 16-byte - boundary. Alignment shold be a power of 2. If 'align' is not + so that the entry starts on an aligned boundary within the containing + section or image. For example 'align = <16>' means that the entry will + start on a 16-byte boundary. This may mean that padding is added before + the entry. The padding is part of the containing section but is not + included in the entry, meaning that an empty space may be created before + the entry starts. Alignment should be a power of 2. If 'align' is not provided, no alignment is performed.
size: @@ -308,14 +311,22 @@ pad-after: align-size: This sets the alignment of the entry size. For example, to ensure that the size of an entry is a multiple of 64 bytes, set this to 64. - If 'align-size' is not provided, no alignment is performed. + While this does not affect the contents of the entry within binman + itself (the padding is performed only when its parent section is + assembled), the end result is that the entry ends with the padding + bytes, so may grow. If 'align-size' is not provided, no alignment is + performed.
align-end: - This sets the alignment of the end of an entry. Some entries require - that they end on an alignment boundary, regardless of where they - start. This does not move the start of the entry, so the contents of - the entry will still start at the beginning. But there may be padding - at the end. If 'align-end' is not provided, no alignment is performed. + This sets the alignment of the end of an entry with respect to the + containing section. Some entries require that they end on an alignment + boundary, regardless of where they start. This does not move the start + of the entry, so the contents of the entry will still start at the + beginning. But there may be padding at the end. While this does not + affect the contents of the entry within binman itself (the padding is + performed only when its parent section is assembled), the end result + is that the entry ends with the padding bytes, so may grow. + If 'align-end' is not provided, no alignment is performed.
filename: For 'blob' types this provides the filename containing the binary to diff --git a/tools/binman/entry.py b/tools/binman/entry.py index e5d0aa52bd6..0421129c031 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -48,9 +48,11 @@ class Entry(object): uncomp_size: Size of uncompressed data in bytes, if the entry is compressed, else None contents_size: Size of contents in bytes, 0 by default - align: Entry start offset alignment, or None + align: Entry start offset alignment relative to the start of the + containing section, or None align_size: Entry size alignment, or None - align_end: Entry end offset alignment, or None + align_end: Entry end offset alignment relative to the start of the + containing section, or None pad_before: Number of pad bytes before the contents when it is placed in the containing section, 0 if none. The pad bytes become part of the entry. diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 74ee7606cca..e265941a392 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -783,7 +783,8 @@ class TestFunctional(unittest.TestCase):
def testPackExtra(self): """Test that extra packing feature works as expected""" - data = self._DoReadFile('009_pack_extra.dts') + data, _, _, out_dtb_fname = self._DoReadFileDtb('009_pack_extra.dts', + update_dtb=True)
self.assertIn('image', control.images) image = control.images['image'] @@ -842,6 +843,36 @@ class TestFunctional(unittest.TestCase): self.CheckNoGaps(entries) self.assertEqual(128, image.size)
+ dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['size', 'offset', 'image-pos']) + expected = { + 'image-pos': 0, + 'offset': 0, + 'size': 128, + + 'u-boot:image-pos': 0, + 'u-boot:offset': 0, + 'u-boot:size': 3 + 5 + len(U_BOOT_DATA), + + 'u-boot-align-size-nop:image-pos': 12, + 'u-boot-align-size-nop:offset': 12, + 'u-boot-align-size-nop:size': 4, + + 'u-boot-align-size:image-pos': 16, + 'u-boot-align-size:offset': 16, + 'u-boot-align-size:size': 32, + + 'u-boot-align-end:image-pos': 48, + 'u-boot-align-end:offset': 48, + 'u-boot-align-end:size': 16, + + 'u-boot-align-both:image-pos': 64, + 'u-boot-align-both:offset': 64, + 'u-boot-align-both:size': 64, + } + self.assertEqual(expected, props) + def testPackAlignPowerOf2(self): """Test that invalid entry alignment is detected""" with self.assertRaises(ValueError) as e:

Create a new _BuildSectionData() to hold the code that is now in GetData(), so that it is clearly separated from entry.GetData() base function.
Separate out the 'pad-before' processing to make this easier to understand.
Unfortunately this breaks the testDual test. Rather than squash several patches into an un-reviewable glob, disable the test for now.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 35 +++++++++++++++++++++++++++++------ tools/binman/ftest.py | 3 ++- 2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 9222042f5d8..d05adf00274 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -144,24 +144,47 @@ class Entry_section(Entry): def ObtainContents(self): return self.GetEntryContents()
- def GetData(self): + def _BuildSectionData(self): + """Build the contents of a section + + This places all entries at the right place, dealing with padding before + and after entries. It does not do padding for the section itself (the + pad-before and pad-after properties in the section items) since that is + handled by the parent section. + + Returns: + Contents of the section (bytes) + """ section_data = b''
for entry in self._entries.values(): data = entry.GetData() - base = self.pad_before + (entry.offset or 0) - self._skip_at_start - pad = base - len(section_data) + (entry.pad_before or 0) + # Handle empty space before the entry + pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad) + + # Handle padding before the entry + if entry.pad_before: + section_data += tools.GetBytes(self._pad_byte, entry.pad_before) + + # Add in the actual entry data section_data += data + + # Handle padding after the entry + if entry.pad_after: + section_data += tools.GetBytes(self._pad_byte, entry.pad_after) + if self.size: - pad = self.size - len(section_data) - if pad > 0: - section_data += tools.GetBytes(self._pad_byte, pad) + section_data += tools.GetBytes(self._pad_byte, + self.size - len(section_data)) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data))) return self.CompressData(section_data)
+ def GetData(self): + return self._BuildSectionData() + def GetOffsets(self): """Handle entries that want to set the offset/size of other entries
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index e265941a392..3c6eb7f6736 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -706,7 +706,8 @@ class TestFunctional(unittest.TestCase): """Test a simple binman run with debugging enabled""" self._DoTestFile('005_simple.dts', debug=True)
- def testDual(self): + # Disable for now until padding of images is supported + def xtestDual(self): """Test that we can handle creating two images
This also tests image padding.

On 19/10/2020 05:41, Simon Glass wrote:
Create a new _BuildSectionData() to hold the code that is now in GetData(), so that it is clearly separated from entry.GetData() base function.
Separate out the 'pad-before' processing to make this easier to understand.
Unfortunately this breaks the testDual test. Rather than squash several patches into an un-reviewable glob, disable the test for now.
Signed-off-by: Simon Glass sjg@chromium.org
tools/binman/etype/section.py | 35 +++++++++++++++++++++++++++++------ tools/binman/ftest.py | 3 ++- 2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 9222042f5d8..d05adf00274 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -144,24 +144,47 @@ class Entry_section(Entry): def ObtainContents(self): return self.GetEntryContents()
- def GetData(self):
def _BuildSectionData(self):
"""Build the contents of a section
This places all entries at the right place, dealing with padding before
and after entries. It does not do padding for the section itself (the
pad-before and pad-after properties in the section items) since that is
handled by the parent section.
Returns:
Contents of the section (bytes)
""" section_data = b'' for entry in self._entries.values(): data = entry.GetData()
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
pad = base - len(section_data) + (entry.pad_before or 0)
# Handle empty space before the entry
pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad)
# Handle padding before the entry
if entry.pad_before:
section_data += tools.GetBytes(self._pad_byte, entry.pad_before)
Consider this fragment:
section { skip-at-start = <16>;
blob { pad-before = <16>; filename = "foo"; } }
Is this invalid as 'blob' isn't offset > skip-at-start? This wouldn't be padded in the earlier code, but would be padded after this (assuming it doesn't error out) -- was that a bug or undefined behaviour or something?
# Add in the actual entry data section_data += data
# Handle padding after the entry
if entry.pad_after:
section_data += tools.GetBytes(self._pad_byte, entry.pad_after)
if self.size:
pad = self.size - len(section_data)
if pad > 0:
section_data += tools.GetBytes(self._pad_byte, pad)
section_data += tools.GetBytes(self._pad_byte,
self.size - len(section_data)) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data))) return self.CompressData(section_data)
def GetData(self):
return self._BuildSectionData()
def GetOffsets(self): """Handle entries that want to set the offset/size of other entries
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index e265941a392..3c6eb7f6736 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -706,7 +706,8 @@ class TestFunctional(unittest.TestCase): """Test a simple binman run with debugging enabled""" self._DoTestFile('005_simple.dts', debug=True)
- def testDual(self):
- # Disable for now until padding of images is supported
- def xtestDual(self):
I think you could use @unittest.skip() here, but perhaps it's not worth doing a v2 for that (especially since you're re-enabling it later in the series).
"""Test that we can handle creating two images This also tests image padding.

Hi Alper,
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
Create a new _BuildSectionData() to hold the code that is now in GetData(), so that it is clearly separated from entry.GetData() base function.
Separate out the 'pad-before' processing to make this easier to understand.
Unfortunately this breaks the testDual test. Rather than squash several patches into an un-reviewable glob, disable the test for now.
Signed-off-by: Simon Glass sjg@chromium.org
tools/binman/etype/section.py | 35 +++++++++++++++++++++++++++++------ tools/binman/ftest.py | 3 ++- 2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 9222042f5d8..d05adf00274 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -144,24 +144,47 @@ class Entry_section(Entry): def ObtainContents(self): return self.GetEntryContents()
- def GetData(self):
def _BuildSectionData(self):
"""Build the contents of a section
This places all entries at the right place, dealing with padding before
and after entries. It does not do padding for the section itself (the
pad-before and pad-after properties in the section items) since that is
handled by the parent section.
Returns:
Contents of the section (bytes)
""" section_data = b'' for entry in self._entries.values(): data = entry.GetData()
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
pad = base - len(section_data) + (entry.pad_before or 0)
# Handle empty space before the entry
pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad)
# Handle padding before the entry
if entry.pad_before:
section_data += tools.GetBytes(self._pad_byte, entry.pad_before)
Consider this fragment:
section { skip-at-start = <16>; blob { pad-before = <16>; filename = "foo"; } }
Is this invalid as 'blob' isn't offset > skip-at-start? This wouldn't be padded in the earlier code, but would be padded after this (assuming it doesn't error out) -- was that a bug or undefined behaviour or something?
You have very sharp eyes.
The case you list seems to produce the same result before and after this patch. But if I put the padding at the top level it does strange things, so perhaps that's what you mean?
I've added a few test cases along these lines in v2, and one of them certainly different behaviour. This is good actually since it shows a simple case of what these padding changes are intended to fix.
# Add in the actual entry data section_data += data
# Handle padding after the entry
if entry.pad_after:
section_data += tools.GetBytes(self._pad_byte, entry.pad_after)
if self.size:
pad = self.size - len(section_data)
if pad > 0:
section_data += tools.GetBytes(self._pad_byte, pad)
section_data += tools.GetBytes(self._pad_byte,
self.size - len(section_data)) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data))) return self.CompressData(section_data)
def GetData(self):
return self._BuildSectionData()
def GetOffsets(self): """Handle entries that want to set the offset/size of other entries
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index e265941a392..3c6eb7f6736 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -706,7 +706,8 @@ class TestFunctional(unittest.TestCase): """Test a simple binman run with debugging enabled""" self._DoTestFile('005_simple.dts', debug=True)
- def testDual(self):
- # Disable for now until padding of images is supported
- def xtestDual(self):
I think you could use @unittest.skip() here, but perhaps it's not worth doing a v2 for that (especially since you're re-enabling it later in the series).
Yes, will do.
"""Test that we can handle creating two images This also tests image padding.
Regards, Simon

On 26/10/2020 22:22, Simon Glass wrote:
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
for entry in self._entries.values(): data = entry.GetData()
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
pad = base - len(section_data) + (entry.pad_before or 0)
# Handle empty space before the entry
pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad)
# Handle padding before the entry
if entry.pad_before:
section_data += tools.GetBytes(self._pad_byte, entry.pad_before)
Consider this fragment:
section { skip-at-start = <16>; blob { pad-before = <16>; filename = "foo"; } }
Is this invalid as 'blob' isn't offset > skip-at-start? This wouldn't be padded in the earlier code, but would be padded after this (assuming it doesn't error out) -- was that a bug or undefined behaviour or something?
You have very sharp eyes.
Thanks! I had actually tried to copy this to the fit etype before making it use the section etype directly, so I did read and think about this part (and Pack()) a lot to better understand how things were supposed to work.
The case you list seems to produce the same result before and after this patch. But if I put the padding at the top level it does strange things, so perhaps that's what you mean?
I was trying to write a case where a pad = (-16) + (16) = 0 gets split into two (pad = -16, then entry.pad_before = 16) after that change, causing a change in padding. Still looks correct to me, but I didn't actually run anything so I'm probably getting something wrong.
I've added a few test cases along these lines in v2, and one of them certainly different behaviour. This is good actually since it shows a simple case of what these padding changes are intended to fix.

Hi Alper,
On Mon, 26 Oct 2020 at 17:20, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 26/10/2020 22:22, Simon Glass wrote:
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
for entry in self._entries.values(): data = entry.GetData()
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
pad = base - len(section_data) + (entry.pad_before or 0)
# Handle empty space before the entry
pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad)
# Handle padding before the entry
if entry.pad_before:
section_data += tools.GetBytes(self._pad_byte, entry.pad_before)
Consider this fragment:
section { skip-at-start = <16>; blob { pad-before = <16>; filename = "foo"; } }
Is this invalid as 'blob' isn't offset > skip-at-start? This wouldn't be padded in the earlier code, but would be padded after this (assuming it doesn't error out) -- was that a bug or undefined behaviour or something?
You have very sharp eyes.
Thanks! I had actually tried to copy this to the fit etype before making it use the section etype directly, so I did read and think about this part (and Pack()) a lot to better understand how things were supposed to work.
The case you list seems to produce the same result before and after this patch. But if I put the padding at the top level it does strange things, so perhaps that's what you mean?
I was trying to write a case where a pad = (-16) + (16) = 0 gets split into two (pad = -16, then entry.pad_before = 16) after that change, causing a change in padding. Still looks correct to me, but I didn't actually run anything so I'm probably getting something wrong.
I've added a few test cases along these lines in v2, and one of them certainly different behaviour. This is good actually since it shows a simple case of what these padding changes are intended to fix.
See what you think of the above test cases - testSectionPad() and testSectionAlign()
Regards, Simon

On 03/11/2020 18:11, Simon Glass wrote:
Hi Alper,
On Mon, 26 Oct 2020 at 17:20, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 26/10/2020 22:22, Simon Glass wrote:
I've added a few test cases along these lines in v2, and one of them certainly different behaviour. This is good actually since it shows a simple case of what these padding changes are intended to fix.
See what you think of the above test cases - testSectionPad() and testSectionAlign()
I've tried to visualize those tests a bit, in the following:
- The vertical line of numbers is the offsets, usually starts with 0 and ends with entry.size of that entry. - The offset to the upper-left of a block is that entry's entry.offset - The "*" offsets are unconstrained, determined as parts are fitted together. - The "k*n" are offsets for alignment that must be a multiple of k - The vertical "[...]" line is what the entry.data returns for that entry. - The horizontal line from a value is what the value corresponds to.
Hope things make sense. I kind of started drawing things to gather my thoughts and improve my understanding, but didn't want to discard them. Please do tell if anything's wrong.
== 177_skip_at_start.dts ==
binman { [ 0 . | section { . | [ 0 . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | [ 0 . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | } . | . | * . | ] * . | } ] * }
I understand skip-at-start as if it's creating a frame of reference for alignments. Then, that frame is mapped back to starting from zero.
It looks weird here for me to use two nested offset-lines here. I can't use one line that starts at skip-at-start, because that changes the entry.offset if the section has pad-before.
== 178_skip_at_start_pad.dts ==
binman { [ 0 . | section { . | [ 0 . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | 0 . | . | | | 8 x <0x00> . | . | | | --------------- binman/section/u-boot:pad-before . | . | | [ * . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | | 4 x <0x00> . | . | | | --------------- binman/section/u-boot:pad-after . | . | | * ----------------- binman/section/u-boot:size . | . | | } . | . | * . | ] * . | } ] * }
This is like the above, just adds padding to u-boot. I have to visualize the padding as something inside the entry block, since alignments and entry.size is calculated for the padded data, not the raw U_BOOT_DATA. It's a bit weird but understandable that len(entry.data) != entry.size.
== 179_skip_at_start_section_pad.dts ==
binman { [ 0 . | section { . | 0 . | | 8 x <0x00> . | | --------------------- binman/section:pad-before . | [ * . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | [ 0 . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | } . | . | * . | ] * . | | 4 x <0x00> . | | --------------------- binman/section:pad-after . | * . | } ] * }
I'm still having trouble with this. In the old code:
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
8 + 16 - 16
pad = base - len(section_data) + (entry.pad_before or 0)
8 - 0 + 0
if pad > 0:
8
section_data += tools.GetBytes(self._pad_byte, pad)
8
So, why was it prepending 16 bytes? The only way I see is if u-boot entry.offset is 24, but that's explicitly checked to be 16 in the test.
However, it's clear to me now that the fragment I sent wouldn't result in different padding between two versions, because there entry.offset = section.skip_at_start so the negative padding never happens.
Then, what does an entry.offset < section.skip-at-start mean? That's what was missing for the actual thing I was trying to trigger:
section { skip-at-start = <16>;
blob { offset = <0>; pad-before = <16>; filename = "foo"; }; };
== 180_section_pad.dts ==
binman { [ 0 . | section@0 { . | 0 . | | 3 x <0x26> -------------- binman:pad-byte . | | ----------------------- binman/section@0:pad-before . | [ * . | . | u-boot { . | . | 0 . | . | | 5 x <0x21> ---------- binman/section@0:pad-byte . | . | | ------------------- binman/section@0/u-boot:pad-before . | . | [ * . | . | . | U_BOOT_DATA . | . | ] * . | . | | 1 x <0x21> ---------- binman/section@0:pad-byte . | . | * ------------------- binman/section@0/u-boot:pad-after . | . | } . | ] * . | | 2 x <0x26> -------------- binman:pad-byte . | | ----------------------- binman/section@0:pad-after . | * . | } ] * }
It looks like paddings are part of the entry: - entry.offset and entry.image_pos point to pad-before padding - entry.size includes both paddings - pad-before, pad-after properties belong to the entry - entry's parent aligns the entry with respect to the padded-data
But, also the opposite: - entry.data doesn't include padding bytes - it's entirely added by the entry's parent - pad-byte property belongs to the parent
I have no idea which way things should go towards. I think padding could be completely handled by the entries themselves. Section's GetPaddedDataForEntry(entry, entry_data) could be moved to Entry as GetPaddedData(pad_byte), which the parent section would use while assembling itself. The pad_byte argument could be dropped by making the entries find it by traversing upwards in the tree starting from the entry itself (and not just checking the immediate parent).
== 181_section_align.dts ==
binman { [ 0 . | fill { . | [ 0 . | . | <0x00> . | ] 1 ------------------------- binman/fill:size . | } . * . | <0x26> ---------------------- binman:pad-byte . 2*n --------------------------- binman/section@1:align . | section@1 { . | [ 0 . | . | fill { . | . | [ 0 . | . | . | <0x00> . | . | ] 1 --------------------- binman/section@1/fill:size . | . | } . | . * . | . | <0x21> ------------------ binman/section@1:pad-byte . | . 4*n ----------------------- binman/section@1/u-boot:align . | . | u-boot { . | . | [ 0 . | . | . | U_BOOT_DATA . | . | ] * . | . | | <0x21> -------------- binman/section@1:pad-byte . | . | 8*n ------------------- binman/section@1/u-boot:size . | . | ----------------------binman/section@1/u-boot:align-size . | . | } . | ] * . | | <0x21> ------------------ binman/section@1:pad-byte . | 0x10*n -------------------- binman/section@1:size . | ------------------------ binman/section@1:align-size . | } ] * }
The pad-byte values here surprise me a bit. I'd say they should be the parent's pad-byte, since I think this in-section alignment padding is the same kind of thing as the pad-before and pad-after, and those use the parent's. However, like what I said above, the latter two could instead be changed to use the entry's pad-byte like this one.

Hi Alper,
On Wed, 4 Nov 2020 at 14:51, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 03/11/2020 18:11, Simon Glass wrote:
Hi Alper,
On Mon, 26 Oct 2020 at 17:20, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 26/10/2020 22:22, Simon Glass wrote:
I've added a few test cases along these lines in v2, and one of them certainly different behaviour. This is good actually since it shows a simple case of what these padding changes are intended to fix.
See what you think of the above test cases - testSectionPad() and testSectionAlign()
I've tried to visualize those tests a bit, in the following:
- The vertical line of numbers is the offsets, usually starts with 0 and ends with entry.size of that entry.
- The offset to the upper-left of a block is that entry's entry.offset
- The "*" offsets are unconstrained, determined as parts are fitted together.
- The "k*n" are offsets for alignment that must be a multiple of k
- The vertical "[...]" line is what the entry.data returns for that entry.
- The horizontal line from a value is what the value corresponds to.
Hope things make sense. I kind of started drawing things to gather my thoughts and improve my understanding, but didn't want to discard them. Please do tell if anything's wrong.
== 177_skip_at_start.dts ==
binman { [ 0 . | section { . | [ 0 . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | [ 0 . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | } . | . | * . | ] * . | } ] * }
I understand skip-at-start as if it's creating a frame of reference for alignments. Then, that frame is mapped back to starting from zero.
It looks weird here for me to use two nested offset-lines here. I can't use one line that starts at skip-at-start, because that changes the entry.offset if the section has pad-before.
== 178_skip_at_start_pad.dts ==
binman { [ 0 . | section { . | [ 0 . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | 0 . | . | | | 8 x <0x00> . | . | | | \--------------- binman/section/u-boot:pad-before . | . | | [ * . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | | 4 x <0x00> . | . | | | \--------------- binman/section/u-boot:pad-after . | . | | * ----------------- binman/section/u-boot:size . | . | | } . | . | * . | ] * . | } ] * }
This is like the above, just adds padding to u-boot. I have to visualize the padding as something inside the entry block, since alignments and entry.size is calculated for the padded data, not the raw U_BOOT_DATA. It's a bit weird but understandable that len(entry.data) != entry.size.
== 179_skip_at_start_section_pad.dts ==
binman { [ 0 . | section { . | 0 . | | 8 x <0x00> . | | \--------------------- binman/section:pad-before . | [ * . | . | 16 -------------------- binman/section:skip-at-start . | . | | u-boot { . | . | | [ 0 . | . | | . | U_BOOT_DATA . | . | | ] * . | . | | } . | . | * . | ] * . | | 4 x <0x00> . | | \--------------------- binman/section:pad-after . | * . | } ] * }
I'm still having trouble with this. In the old code:
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
8 + 16 - 16
pad = base - len(section_data) + (entry.pad_before or 0)
8 - 0 + 0
if pad > 0:
8
section_data += tools.GetBytes(self._pad_byte, pad)
8
So, why was it prepending 16 bytes? The only way I see is if u-boot entry.offset is 24, but that's explicitly checked to be 16 in the test.
However, it's clear to me now that the fragment I sent wouldn't result in different padding between two versions, because there entry.offset = section.skip_at_start so the negative padding never happens.
Then, what does an entry.offset < section.skip-at-start mean? That's what was missing for the actual thing I was trying to trigger:
section { skip-at-start = <16>; blob { offset = <0>; pad-before = <16>; filename = "foo"; }; };
== 180_section_pad.dts ==
binman { [ 0 . | section@0 { . | 0 . | | 3 x <0x26> -------------- binman:pad-byte . | | ----------------------- binman/section@0:pad-before . | [ * . | . | u-boot { . | . | 0 . | . | | 5 x <0x21> ---------- binman/section@0:pad-byte . | . | | ------------------- binman/section@0/u-boot:pad-before . | . | [ * . | . | . | U_BOOT_DATA . | . | ] * . | . | | 1 x <0x21> ---------- binman/section@0:pad-byte . | . | * ------------------- binman/section@0/u-boot:pad-after . | . | } . | ] * . | | 2 x <0x26> -------------- binman:pad-byte . | | ----------------------- binman/section@0:pad-after . | * . | } ] * }
It looks like paddings are part of the entry:
- entry.offset and entry.image_pos point to pad-before padding
- entry.size includes both paddings
- pad-before, pad-after properties belong to the entry
- entry's parent aligns the entry with respect to the padded-data
But, also the opposite:
- entry.data doesn't include padding bytes
- it's entirely added by the entry's parent
- pad-byte property belongs to the parent
I have no idea which way things should go towards. I think padding could be completely handled by the entries themselves. Section's GetPaddedDataForEntry(entry, entry_data) could be moved to Entry as GetPaddedData(pad_byte), which the parent section would use while assembling itself. The pad_byte argument could be dropped by making the entries find it by traversing upwards in the tree starting from the entry itself (and not just checking the immediate parent).
I appreciate all your thinking on this!
I originally had it so len(entry.data) == entry.size (and also entry.contents_size). But the problem is that for compressed data we don't want to compress the padding. Say we have an entry of size 100 with only e0 bytes used (hex used througout):
0 --- <data> e0 <empty> 100 ---
We can certainly add the extra 20 bytes onto the end of the entry data, as was done previously. Let's now compress it. We want to compress the actual contents of the entry, excluding any padding, since the padding is for the entry, not its contents:
0 --- <compressed data> 80 <empty> 100 ---
Binman has always had the concept of Entry.contents_size separate from Entry.size. But they have typically been the same. With compression, that breaks down, since the Entry.data is the compressed data (Entry.uncomp_data is the uncompressed) and Entry.contents_size is the size of the compressed data. In fact the uncompressed data might not even fit in the entry.
In this case it does not make sense to have the Entry pad its own data, since then it would be compressing part of it with padding around. In trying to think this through and actually implement it, I came to the point where it is today.
== 181_section_align.dts ==
binman { [ 0 . | fill { . | [ 0 . | . | <0x00> . | ] 1 ------------------------- binman/fill:size . | } . * . | <0x26> ---------------------- binman:pad-byte . 2*n --------------------------- binman/section@1:align . | section@1 { . | [ 0 . | . | fill { . | . | [ 0 . | . | . | <0x00> . | . | ] 1 --------------------- binman/section@1/fill:size . | . | } . | . * . | . | <0x21> ------------------ binman/section@1:pad-byte . | . 4*n ----------------------- binman/section@1/u-boot:align . | . | u-boot { . | . | [ 0 . | . | . | U_BOOT_DATA . | . | ] * . | . | | <0x21> -------------- binman/section@1:pad-byte . | . | 8*n ------------------- binman/section@1/u-boot:size . | . | ----------------------binman/section@1/u-boot:align-size . | . | } . | ] * . | | <0x21> ------------------ binman/section@1:pad-byte . | 0x10*n -------------------- binman/section@1:size . | ------------------------ binman/section@1:align-size . | } ] * }
The pad-byte values here surprise me a bit. I'd say they should be the parent's pad-byte, since I think this in-section alignment padding is the same kind of thing as the pad-before and pad-after, and those use the parent's. However, like what I said above, the latter two could instead be changed to use the entry's pad-byte like this one.
With the design decision above it makes sense for the parent to provide the pad data, since it is doing the padding. Were it to come from the Entry itself, then every entry would need to specify the padding byte to use. But if you think about it at a high level, it is the parent section that is providing the space for the Entries to be packed into, so the parent section should provide the padding byte. Again, I have tried it both ways...
This stuff is surprisingly complicated. It will be interesting to see if it stands the test of time. One reason for all the tests is so the behaviour is as fully defined as possible.
Regards, Simon

At present this function does the padding needed around an entry. It is easier to understand what is going on if we have a function that returns the contents of an entry, with padding included.
Refactor the code accordingly, adding a new GetPaddedData() method.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 59 +++++++++++++++++++++++++++++------ tools/binman/image.py | 2 +- 2 files changed, 51 insertions(+), 10 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index d05adf00274..f80432914f2 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -16,6 +16,7 @@ from binman.entry import Entry from dtoc import fdt_util from patman import tools from patman import tout +from patman.tools import ToHexSize
class Entry_section(Entry): @@ -144,6 +145,36 @@ class Entry_section(Entry): def ObtainContents(self): return self.GetEntryContents()
+ def GetPaddedDataForEntry(self, entry): + """Get the data for an entry including any padding + + Gets the entry data and uses the section pad-byte value to add padding + before and after as defined by the pad-before and pad-after properties. + This does not consider alignment. + + Args: + entry: Entry to check + + Returns: + Contents of the entry along with any pad bytes before and + after it (bytes) + """ + data = b'' + # Handle padding before the entry + if entry.pad_before: + data += tools.GetBytes(self._pad_byte, entry.pad_before) + + # Add in the actual entry data + data += entry.GetData() + + # Handle padding after the entry + if entry.pad_after: + data += tools.GetBytes(self._pad_byte, entry.pad_after) + + self.Detail('GetPaddedDataForEntry: size %s' % ToHexSize(self.data)) + + return data + def _BuildSectionData(self): """Build the contents of a section
@@ -158,23 +189,15 @@ class Entry_section(Entry): section_data = b''
for entry in self._entries.values(): - data = entry.GetData() + data = self.GetPaddedDataForEntry(entry) # Handle empty space before the entry pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: section_data += tools.GetBytes(self._pad_byte, pad)
- # Handle padding before the entry - if entry.pad_before: - section_data += tools.GetBytes(self._pad_byte, entry.pad_before) - # Add in the actual entry data section_data += data
- # Handle padding after the entry - if entry.pad_after: - section_data += tools.GetBytes(self._pad_byte, entry.pad_after) - if self.size: section_data += tools.GetBytes(self._pad_byte, self.size - len(section_data)) @@ -182,6 +205,24 @@ class Entry_section(Entry): (len(self._entries), len(section_data))) return self.CompressData(section_data)
+ def GetPaddedData(self): + """Get the data for a section including any padding + + Gets the section data and uses the parent section's pad-byte value to + add padding before and after as defined by the pad-before and pad-after + properties. If this is a top-level section (i.e. an image), this is the + same as GetData(), since padding is not supported. + + This does not consider alignment. + + Returns: + Contents of the section along with any pad bytes before and + after it (bytes) + """ + if self.section: + return super().GetPaddedData() + return self.GetData() + def GetData(self): return self._BuildSectionData()
diff --git a/tools/binman/image.py b/tools/binman/image.py index a8772c3763b..d65ab887b80 100644 --- a/tools/binman/image.py +++ b/tools/binman/image.py @@ -146,7 +146,7 @@ class Image(section.Entry_section): fname = tools.GetOutputFilename(self._filename) tout.Info("Writing image to '%s'" % fname) with open(fname, 'wb') as fd: - data = self.GetData() + data = self.GetPaddedData() fd.write(data) tout.Info("Wrote %#x bytes" % len(data))

Each section is padded up to its size, if the contents are not large enough. Move this logic from _BuildSectionData() to GetPaddedDataForEntry() so that all the padding is in one place.
With this, the testDual test is working again, so enable it.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 18 ++++++++++-------- tools/binman/ftest.py | 3 +-- 2 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index f80432914f2..7cbb50057a3 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -159,17 +159,23 @@ class Entry_section(Entry): Contents of the entry along with any pad bytes before and after it (bytes) """ + pad_byte = (entry._pad_byte if isinstance(entry, Entry_section) + else self._pad_byte) + data = b'' # Handle padding before the entry if entry.pad_before: - data += tools.GetBytes(self._pad_byte, entry.pad_before) + data += tools.GetBytes(pad_byte, entry.pad_before)
# Add in the actual entry data data += entry.GetData()
# Handle padding after the entry if entry.pad_after: - data += tools.GetBytes(self._pad_byte, entry.pad_after) + data += tools.GetBytes(pad_byte, entry.pad_after) + + if entry.size: + data += tools.GetBytes(pad_byte, entry.size - len(data))
self.Detail('GetPaddedDataForEntry: size %s' % ToHexSize(self.data))
@@ -198,9 +204,6 @@ class Entry_section(Entry): # Add in the actual entry data section_data += data
- if self.size: - section_data += tools.GetBytes(self._pad_byte, - self.size - len(section_data)) self.Detail('GetData: %d entries, total size %#x' % (len(self._entries), len(section_data))) return self.CompressData(section_data) @@ -219,9 +222,8 @@ class Entry_section(Entry): Contents of the section along with any pad bytes before and after it (bytes) """ - if self.section: - return super().GetPaddedData() - return self.GetData() + section = self.section or self + return section.GetPaddedDataForEntry(self)
def GetData(self): return self._BuildSectionData() diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 3c6eb7f6736..e265941a392 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -706,8 +706,7 @@ class TestFunctional(unittest.TestCase): """Test a simple binman run with debugging enabled""" self._DoTestFile('005_simple.dts', debug=True)
- # Disable for now until padding of images is supported - def xtestDual(self): + def testDual(self): """Test that we can handle creating two images
This also tests image padding.

At present padding of sections is inconsistent with other entry types, in that different pad bytes are used.
When a normal entry is padded by its parent, the parent's pad byte is used. But for sections, the section's pad byte is used.
Adjust logic to always do this the same way.
Note there is still a special case in entry_Section.GetPaddedData() where an image is padded with the pad byte of the top-level section. This is necessary since otherwise there would be no way to set the pad byte of the image, without adding a top-level section to every image.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 4 +-- tools/binman/ftest.py | 22 ++++++++++++++++ tools/binman/test/177_section_pad.dts | 27 ++++++++++++++++++++ tools/binman/test/178_section_align.dts | 34 +++++++++++++++++++++++++ 4 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 tools/binman/test/177_section_pad.dts create mode 100644 tools/binman/test/178_section_align.dts
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 7cbb50057a3..c423a22c80f 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -165,14 +165,14 @@ class Entry_section(Entry): data = b'' # Handle padding before the entry if entry.pad_before: - data += tools.GetBytes(pad_byte, entry.pad_before) + data += tools.GetBytes(self._pad_byte, entry.pad_before)
# Add in the actual entry data data += entry.GetData()
# Handle padding after the entry if entry.pad_after: - data += tools.GetBytes(pad_byte, entry.pad_after) + data += tools.GetBytes(self._pad_byte, entry.pad_after)
if entry.size: data += tools.GetBytes(pad_byte, entry.size - len(data)) diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index e265941a392..a43f79cc248 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -3821,6 +3821,28 @@ class TestFunctional(unittest.TestCase): self.assertIn("too small to hold data (need %#x more bytes)" % short, str(e.exception))
+ def testSectionPad(self): + """Testing padding with sections""" + data = self._DoReadFile('177_section_pad.dts') + expected = (tools.GetBytes(ord('&'), 3) + + tools.GetBytes(ord('!'), 5) + + U_BOOT_DATA + + tools.GetBytes(ord('!'), 1) + + tools.GetBytes(ord('&'), 2)) + self.assertEqual(expected, data) + + def testSectionAlign(self): + """Testing alignment with sections""" + data = self._DoReadFileDtb('178_section_align.dts', map=True)[0] + expected = (b'\0' + # fill section + tools.GetBytes(ord('&'), 1) + # padding to section align + b'\0' + # fill section + tools.GetBytes(ord('!'), 3) + # padding to u-boot align + U_BOOT_DATA + + tools.GetBytes(ord('!'), 4) + # padding to u-boot size + tools.GetBytes(ord('!'), 4)) # padding to section size + self.assertEqual(expected, data) +
if __name__ == "__main__": unittest.main() diff --git a/tools/binman/test/177_section_pad.dts b/tools/binman/test/177_section_pad.dts new file mode 100644 index 00000000000..7e4ebf257b8 --- /dev/null +++ b/tools/binman/test/177_section_pad.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + pad-byte = <0x26>; + section@0 { + read-only; + + /* Padding for the section uses the 0x26 pad byte */ + pad-before = <3>; + pad-after = <2>; + + /* Set the padding byte for entries, i.e. u-boot */ + pad-byte = <0x21>; + + u-boot { + pad-before = <5>; + pad-after = <1>; + }; + }; + }; +}; diff --git a/tools/binman/test/178_section_align.dts b/tools/binman/test/178_section_align.dts new file mode 100644 index 00000000000..90795d131b0 --- /dev/null +++ b/tools/binman/test/178_section_align.dts @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + pad-byte = <0x26>; + fill { + size = <1>; + }; + section@1 { + read-only; + + /* Padding for the section uses the 0x26 pad byte */ + align = <2>; + align-size = <0x10>; + + /* Set the padding byte for entries, i.e. u-boot */ + pad-byte = <0x21>; + + fill { + size = <1>; + }; + + u-boot { + align = <4>; + align-size = <8>; + }; + }; + }; +};

When compressing an entry, the original uncompressed data is overwritten. Store it so it is available if needed.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/entry.py | 7 ++++++- tools/binman/ftest.py | 12 ++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 0421129c031..d701eaff8fd 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -60,7 +60,10 @@ class Entry(object): the containing section, 0 if none. The pad bytes become part of the entry. data: Contents of entry (string of bytes). This does not include - padding created by pad_before or pad_after + padding created by pad_before or pad_after. If the entry is + compressed, this contains the compressed data. + uncomp_data: Original uncompressed data, if this entry is compressed, + else None compress: Compression algoithm used (e.g. 'lz4'), 'none' if none orig_offset: Original offset value read from node orig_size: Original size value read from node @@ -83,6 +86,7 @@ class Entry(object): self.pre_reset_size = None self.uncomp_size = None self.data = None + self.uncomp_data = None self.contents_size = 0 self.align = None self.align_size = None @@ -856,6 +860,7 @@ features to produce new behaviours. Returns: Compressed data (first word is the compressed size) """ + self.uncomp_data = indata if self.compress != 'none': self.uncomp_size = len(indata) data = tools.Compress(indata, self.compress) diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index a43f79cc248..7360e2ebaba 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -1808,6 +1808,18 @@ class TestFunctional(unittest.TestCase): props = self._GetPropTree(dtb, ['size', 'uncomp-size']) orig = self._decompress(data) self.assertEquals(COMPRESS_DATA, orig) + + # Do a sanity check on various fields + image = control.images['image'] + entries = image.GetEntries() + self.assertEqual(1, len(entries)) + + entry = entries['blob'] + self.assertEqual(COMPRESS_DATA, entry.uncomp_data) + self.assertEqual(len(COMPRESS_DATA), entry.uncomp_size) + orig = self._decompress(entry.data) + self.assertEqual(orig, entry.uncomp_data) + expected = { 'blob:uncomp-size': len(COMPRESS_DATA), 'blob:size': len(data),

Section contents is not set up when ObtainContents() is called, since packing often changes the layout of the contents. Ensure that the contents are correctly recorded by making this function regenerate the section. It is normally only called by the parent section (when packing) or by the top-level image code, when writing out the image. So the performance impact is fairly small.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/entry.py | 6 ++++++ tools/binman/etype/section.py | 14 +++++++++++++- tools/binman/ftest.py | 2 ++ 3 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/tools/binman/entry.py b/tools/binman/entry.py index d701eaff8fd..01a5fde84ed 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -437,6 +437,12 @@ class Entry(object): return self._node.path
def GetData(self): + """Get the contents of an entry + + Returns: + bytes content of the entry, excluding any padding. If the entry is + compressed, the compressed data is returned + """ self.Detail('GetData: size %s' % ToHexSize(self.data)) return self.data
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index c423a22c80f..6e6f6749727 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -226,7 +226,19 @@ class Entry_section(Entry): return section.GetPaddedDataForEntry(self)
def GetData(self): - return self._BuildSectionData() + """Get the contents of an entry + + This builds the contents of the section, stores this as the contents of + the section and returns it + + Returns: + bytes content of the section, made up for all all of its subentries. + This excludes any padding. If the section is compressed, the + compressed data is returned + """ + data = self._BuildSectionData() + self.SetContents(data) + return data
def GetOffsets(self): """Handle entries that want to set the offset/size of other entries diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 7360e2ebaba..bcd4482717a 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -1820,6 +1820,8 @@ class TestFunctional(unittest.TestCase): orig = self._decompress(entry.data) self.assertEqual(orig, entry.uncomp_data)
+ self.assertEqual(image.data, entry.data) + expected = { 'blob:uncomp-size': len(COMPRESS_DATA), 'blob:size': len(data),

When a section is compressed, all entries within it are grouped together into a compressed block of data. This obscures the start of each individual child entry.
Avoid reporting bogus 'image-pos' properties in this case, since it is not possible to access the entry at the location provided. The entire section must be decompressed first.
CBFS does not support compressing whole sections, only individual files, so needs no special handling here.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/control.py | 2 +- tools/binman/entry.py | 18 ++++++++++++++---- tools/binman/etype/cbfs.py | 6 +++--- tools/binman/etype/section.py | 13 ++++++++----- 4 files changed, 26 insertions(+), 13 deletions(-)
diff --git a/tools/binman/control.py b/tools/binman/control.py index ee5771e7292..26f1cf462ec 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -462,7 +462,7 @@ def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt): for image in images.values(): image.ExpandEntries() if update_fdt: - image.AddMissingProperties() + image.AddMissingProperties(True) image.ProcessFdt(dtb)
for dtb_item in state.GetAllFdts(): diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 01a5fde84ed..8fa1dcef2da 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -213,11 +213,20 @@ class Entry(object): def ExpandEntries(self): pass
- def AddMissingProperties(self): - """Add new properties to the device tree as needed for this entry""" - for prop in ['offset', 'size', 'image-pos']: + def AddMissingProperties(self, have_image_pos): + """Add new properties to the device tree as needed for this entry + + Args: + have_image_pos: True if this entry has an image position. This can + be False if its parent section is compressed, since compression + groups all entries together into a compressed block of data, + obscuring the start of each individual child entry + """ + for prop in ['offset', 'size']: if not prop in self._node.props: state.AddZeroProp(self._node, prop) + if have_image_pos and 'image-pos' not in self._node.props: + state.AddZeroProp(self._node, 'image-pos') if self.GetImage().allow_repack: if self.orig_offset is not None: state.AddZeroProp(self._node, 'orig-offset', True) @@ -235,7 +244,8 @@ class Entry(object): state.SetInt(self._node, 'offset', self.offset) state.SetInt(self._node, 'size', self.size) base = self.section.GetRootSkipAtStart() if self.section else 0 - state.SetInt(self._node, 'image-pos', self.image_pos - base) + if self.image_pos is not None: + state.SetInt(self._node, 'image-pos', self.image_pos) if self.GetImage().allow_repack: if self.orig_offset is not None: state.SetInt(self._node, 'orig-offset', self.orig_offset, True) diff --git a/tools/binman/etype/cbfs.py b/tools/binman/etype/cbfs.py index 650ab2c292f..6cdbaa085f5 100644 --- a/tools/binman/etype/cbfs.py +++ b/tools/binman/etype/cbfs.py @@ -237,10 +237,10 @@ class Entry_cbfs(Entry): if entry._cbfs_compress: entry.uncomp_size = cfile.memlen
- def AddMissingProperties(self): - super().AddMissingProperties() + def AddMissingProperties(self, have_image_pos): + super().AddMissingProperties(have_image_pos) for entry in self._cbfs_entries.values(): - entry.AddMissingProperties() + entry.AddMissingProperties(have_image_pos) if entry._cbfs_compress: state.AddZeroProp(entry._node, 'uncomp-size') # Store the 'compress' property, since we don't look at diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 6e6f6749727..2812989ba1a 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -136,11 +136,13 @@ class Entry_section(Entry): for entry in self._entries.values(): entry.ExpandEntries()
- def AddMissingProperties(self): + def AddMissingProperties(self, have_image_pos): """Add new properties to the device tree as needed for this entry""" - super().AddMissingProperties() + super().AddMissingProperties(have_image_pos) + if self.compress != 'none': + have_image_pos = False for entry in self._entries.values(): - entry.AddMissingProperties() + entry.AddMissingProperties(have_image_pos)
def ObtainContents(self): return self.GetEntryContents() @@ -323,8 +325,9 @@ class Entry_section(Entry):
def SetImagePos(self, image_pos): super().SetImagePos(image_pos) - for entry in self._entries.values(): - entry.SetImagePos(image_pos + self.offset) + if self.compress == 'none': + for entry in self._entries.values(): + entry.SetImagePos(image_pos + self.offset)
def ProcessContents(self): sizes_ok_base = super(Entry_section, self).ProcessContents()

On 19/10/2020 05:41, Simon Glass wrote:
When a section is compressed, all entries within it are grouped together into a compressed block of data. This obscures the start of each individual child entry.
Avoid reporting bogus 'image-pos' properties in this case, since it is not possible to access the entry at the location provided. The entire section must be decompressed first.
CBFS does not support compressing whole sections, only individual files, so needs no special handling here.
Signed-off-by: Simon Glass sjg@chromium.org
Maybe instead of this, 'image-pos' could be overloaded to a list of recursive relative positions within compressed archives? Something like:
section { offset = <1000>; compress = "lz4"; /* image-pos = <1000>; */
blob { filename = "foo"; offset = <100>; /* image-pos = <1000>, <100>; */ }; };
cbfs { offset = <2000>; /* image-pos = <2000>; */
blob { filename = "bar"; cbfs-compress = "lz4"; cbfs-offset = <200>; /* image-pos = <2200>, <0>; */ };
blob2 { filename = "baz"; cbfs-offset = <800>; /* image-pos = <2800>; */ }; };
tools/binman/control.py | 2 +- tools/binman/entry.py | 18 ++++++++++++++---- tools/binman/etype/cbfs.py | 6 +++--- tools/binman/etype/section.py | 13 ++++++++----- 4 files changed, 26 insertions(+), 13 deletions(-)
diff --git a/tools/binman/control.py b/tools/binman/control.py index ee5771e7292..26f1cf462ec 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -462,7 +462,7 @@ def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt): for image in images.values(): image.ExpandEntries() if update_fdt:
image.AddMissingProperties()
image.AddMissingProperties(True) image.ProcessFdt(dtb)
for dtb_item in state.GetAllFdts():
diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 01a5fde84ed..8fa1dcef2da 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -213,11 +213,20 @@ class Entry(object): def ExpandEntries(self): pass
- def AddMissingProperties(self):
"""Add new properties to the device tree as needed for this entry"""
for prop in ['offset', 'size', 'image-pos']:
- def AddMissingProperties(self, have_image_pos):
"""Add new properties to the device tree as needed for this entry
Args:
have_image_pos: True if this entry has an image position. This can
be False if its parent section is compressed, since compression
groups all entries together into a compressed block of data,
obscuring the start of each individual child entry
"""
for prop in ['offset', 'size']: if not prop in self._node.props: state.AddZeroProp(self._node, prop)
if have_image_pos and 'image-pos' not in self._node.props:
state.AddZeroProp(self._node, 'image-pos') if self.GetImage().allow_repack: if self.orig_offset is not None: state.AddZeroProp(self._node, 'orig-offset', True)
@@ -235,7 +244,8 @@ class Entry(object): state.SetInt(self._node, 'offset', self.offset) state.SetInt(self._node, 'size', self.size) base = self.section.GetRootSkipAtStart() if self.section else 0
state.SetInt(self._node, 'image-pos', self.image_pos - base)
if self.image_pos is not None:
state.SetInt(self._node, 'image-pos', self.image_pos) if self.GetImage().allow_repack: if self.orig_offset is not None: state.SetInt(self._node, 'orig-offset', self.orig_offset, True)
diff --git a/tools/binman/etype/cbfs.py b/tools/binman/etype/cbfs.py index 650ab2c292f..6cdbaa085f5 100644 --- a/tools/binman/etype/cbfs.py +++ b/tools/binman/etype/cbfs.py @@ -237,10 +237,10 @@ class Entry_cbfs(Entry): if entry._cbfs_compress: entry.uncomp_size = cfile.memlen
- def AddMissingProperties(self):
super().AddMissingProperties()
- def AddMissingProperties(self, have_image_pos):
super().AddMissingProperties(have_image_pos) for entry in self._cbfs_entries.values():
entry.AddMissingProperties()
entry.AddMissingProperties(have_image_pos) if entry._cbfs_compress: state.AddZeroProp(entry._node, 'uncomp-size') # Store the 'compress' property, since we don't look at
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 6e6f6749727..2812989ba1a 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -136,11 +136,13 @@ class Entry_section(Entry): for entry in self._entries.values(): entry.ExpandEntries()
- def AddMissingProperties(self):
- def AddMissingProperties(self, have_image_pos): """Add new properties to the device tree as needed for this entry"""
super().AddMissingProperties()
super().AddMissingProperties(have_image_pos)
if self.compress != 'none':
have_image_pos = False for entry in self._entries.values():
entry.AddMissingProperties()
entry.AddMissingProperties(have_image_pos)
def ObtainContents(self): return self.GetEntryContents()
@@ -323,8 +325,9 @@ class Entry_section(Entry):
def SetImagePos(self, image_pos): super().SetImagePos(image_pos)
for entry in self._entries.values():
entry.SetImagePos(image_pos + self.offset)
if self.compress == 'none':
for entry in self._entries.values():
entry.SetImagePos(image_pos + self.offset)
def ProcessContents(self): sizes_ok_base = super(Entry_section, self).ProcessContents()

Hi Alper,
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
When a section is compressed, all entries within it are grouped together into a compressed block of data. This obscures the start of each individual child entry.
Avoid reporting bogus 'image-pos' properties in this case, since it is not possible to access the entry at the location provided. The entire section must be decompressed first.
CBFS does not support compressing whole sections, only individual files, so needs no special handling here.
Signed-off-by: Simon Glass sjg@chromium.org
Maybe instead of this, 'image-pos' could be overloaded to a list of recursive relative positions within compressed archives? Something like:
section { offset = <1000>; compress = "lz4"; /* image-pos = <1000>; */ blob { filename = "foo"; offset = <100>; /* image-pos = <1000>, <100>; */ }; }; cbfs { offset = <2000>; /* image-pos = <2000>; */ blob { filename = "bar"; cbfs-compress = "lz4"; cbfs-offset = <200>; /* image-pos = <2200>, <0>; */ }; blob2 { filename = "baz"; cbfs-offset = <800>; /* image-pos = <2800>; */ }; };
What do these values actually mean though? What would we use them for?
Note that CBFS compresses its data on a per-entry basis so the image-pos is actually supported.
[..]
Regards, Simon

On 26/10/2020 22:22, Simon Glass wrote:
Hi Alper,
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
When a section is compressed, all entries within it are grouped together into a compressed block of data. This obscures the start of each individual child entry.
Avoid reporting bogus 'image-pos' properties in this case, since it is not possible to access the entry at the location provided. The entire section must be decompressed first.
CBFS does not support compressing whole sections, only individual files, so needs no special handling here.
Signed-off-by: Simon Glass sjg@chromium.org
Maybe instead of this, 'image-pos' could be overloaded to a list of recursive relative positions within compressed archives? Something like:
section { offset = <1000>; compress = "lz4"; /* image-pos = <1000>; */ blob { filename = "foo"; offset = <100>; /* image-pos = <1000>, <100>; */ }; }; cbfs { offset = <2000>; /* image-pos = <2000>; */ blob { filename = "bar"; cbfs-compress = "lz4"; cbfs-offset = <200>; /* image-pos = <2200>, <0>; */ }; blob2 { filename = "baz"; cbfs-offset = <800>; /* image-pos = <2800>; */ }; };
What do these values actually mean though? What would we use them for?
What I meant is using pairs of <position-of-compressed-archive>, <position-in-uncompressed-data> to avoid losing position information of compressed entries, but honestly I'm not sure if any of this will be necessary. I think the single use would be to avoid parsing uncompressed data containing multiple entries to identify what is where.
To get to an entry in a compressed section we could get "size" bytes of data starting from "image-pos[0]", decompress that to somewhere, then get "uncomp-size" bytes of data starting from decompression address + "image-pos[1]". I think it can even be extended to multiple layers of compression (<pos-of-x>, <pos-of-y-in-d(x)>, <pos-of-z-in-d(y)>, ...).
Note that CBFS compresses its data on a per-entry basis so the image-pos is actually supported.
On the compressed cbfs example, I assume the data we currently get at image-pos = <2200> is compressed data and still needs to be decompressed for it to be useable (technically bogus?). If so, I'd go a level deeper by adding a <0> indicating the start of the uncompressed data is the actual position of the entry contents.

Hi Alper,
On Mon, 26 Oct 2020 at 17:20, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 26/10/2020 22:22, Simon Glass wrote:
Hi Alper,
On Mon, 19 Oct 2020 at 15:29, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
On 19/10/2020 05:41, Simon Glass wrote:
When a section is compressed, all entries within it are grouped together into a compressed block of data. This obscures the start of each individual child entry.
Avoid reporting bogus 'image-pos' properties in this case, since it is not possible to access the entry at the location provided. The entire section must be decompressed first.
CBFS does not support compressing whole sections, only individual files, so needs no special handling here.
Signed-off-by: Simon Glass sjg@chromium.org
Maybe instead of this, 'image-pos' could be overloaded to a list of recursive relative positions within compressed archives? Something like:
section { offset = <1000>; compress = "lz4"; /* image-pos = <1000>; */ blob { filename = "foo"; offset = <100>; /* image-pos = <1000>, <100>; */ }; }; cbfs { offset = <2000>; /* image-pos = <2000>; */ blob { filename = "bar"; cbfs-compress = "lz4"; cbfs-offset = <200>; /* image-pos = <2200>, <0>; */ }; blob2 { filename = "baz"; cbfs-offset = <800>; /* image-pos = <2800>; */ }; };
What do these values actually mean though? What would we use them for?
What I meant is using pairs of <position-of-compressed-archive>, <position-in-uncompressed-data> to avoid losing position information of compressed entries, but honestly I'm not sure if any of this will be necessary. I think the single use would be to avoid parsing uncompressed data containing multiple entries to identify what is where.
To get to an entry in a compressed section we could get "size" bytes of data starting from "image-pos[0]", decompress that to somewhere, then get "uncomp-size" bytes of data starting from decompression address + "image-pos[1]". I think it can even be extended to multiple layers of compression (<pos-of-x>, <pos-of-y-in-d(x)>, <pos-of-z-in-d(y)>, ...).
Note that CBFS compresses its data on a per-entry basis so the image-pos is actually supported.
On the compressed cbfs example, I assume the data we currently get at image-pos = <2200> is compressed data and still needs to be decompressed for it to be useable (technically bogus?). If so, I'd go a level deeper by adding a <0> indicating the start of the uncompressed data is the actual position of the entry contents.
For the CBFS case there isn't anything needed I think.
We already have an offset field which tells you where something is in the compressed data. For the case where a compressed section has just other entries in it, the offset is enough.
For the case where the compressed section has other uncompressed sections, we need to add together the offsets (uncompressed section + its entry) to get to the compressed location. It certainly has some benefits.
But I wonder if instead we should have a properly like comp-pos that tells you the absolute offset within the compressed section? I'm not a big fan of making image-pos a multi-cell value.
Regards, Simon

On 30/10/2020 21:15, Simon Glass wrote:
Hi Alper,
On Mon, 26 Oct 2020 at 17:20, Alper Nebi Yasak alpernebiyasak@gmail.com wrote:
What I meant is using pairs of <position-of-compressed-archive>, <position-in-uncompressed-data> to avoid losing position information of compressed entries, but honestly I'm not sure if any of this will be necessary. I think the single use would be to avoid parsing uncompressed data containing multiple entries to identify what is where.
For the CBFS case there isn't anything needed I think.
We already have an offset field which tells you where something is in the compressed data. For the case where a compressed section has just other entries in it, the offset is enough.
That completely slipped my mind. I haven't got a good grasp on all of this yet and it shows :)
So it's possible to just traverse down the tree, keep adding offsets, decompress any encountered compressed entry, then keep adding offsets onto where it's decompressed, and so on.
For the case where the compressed section has other uncompressed sections, we need to add together the offsets (uncompressed section + its entry) to get to the compressed location. It certainly has some benefits.
But I wonder if instead we should have a properly like comp-pos that tells you the absolute offset within the compressed section? I'm not a big fan of making image-pos a multi-cell value.
Then, image-pos is by definition the sum of offsets from the topmost parent (image) downto the entry? If so, that comp-pos would be the same thing but from the closest compressed parent (or simply only support one layer of compression).
I can't think of anything specific that would need this anyway, but I guess one compression should be enough for most if not all cases.

This function just calls CheckEntries() in the only non-trivial implementation. Drop it and use CheckEntries() directly.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/entry.py | 2 +- tools/binman/etype/section.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/tools/binman/entry.py b/tools/binman/entry.py index 8fa1dcef2da..8946d2bc02f 100644 --- a/tools/binman/entry.py +++ b/tools/binman/entry.py @@ -520,7 +520,7 @@ class Entry(object): """ pass
- def CheckOffset(self): + def CheckEntries(self): """Check that the entry offsets are correct
This is used for entries which have extra offset requirements (other diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 2812989ba1a..fb4bf640bfb 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -296,7 +296,7 @@ class Entry_section(Entry): offset = 0 prev_name = 'None' for entry in self._entries.values(): - entry.CheckOffset() + entry.CheckEntries() if (entry.offset < self._skip_at_start or entry.offset + entry.size > self._skip_at_start + self.size): @@ -337,9 +337,6 @@ class Entry_section(Entry): sizes_ok = False return sizes_ok and sizes_ok_base
- def CheckOffset(self): - self.CheckEntries() - def WriteMap(self, fd, indent): """Write a map of the section to a .map file

At present sorting and expanding entries are side-effects of the CheckEntries() function. This is a bit confusing, as 'checking' would not normally involve making changes.
Move these steps into the Pack() function instead.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index fb4bf640bfb..c883f0d67c4 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -260,6 +260,10 @@ class Entry_section(Entry): def Pack(self, offset): """Pack all entries into the section""" self._PackEntries() + if self._sort: + self._SortEntries() + self._ExpandEntries() + return super().Pack(offset)
def _PackEntries(self): @@ -290,9 +294,6 @@ class Entry_section(Entry):
def CheckEntries(self): """Check that entries do not overlap or extend outside the section""" - if self._sort: - self._SortEntries() - self._ExpandEntries() offset = 0 prev_name = 'None' for entry in self._entries.values():

This is only used by entry_Section and that class already calls it. Avoid calling it twice. Also drop it from the documentation.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README | 21 ++++++++++----------- tools/binman/control.py | 1 - 2 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/tools/binman/README b/tools/binman/README index 0dee71d1b22..c7c787c99fd 100644 --- a/tools/binman/README +++ b/tools/binman/README @@ -712,41 +712,40 @@ size of an entry. The 'current' image offset is passed in, and the function returns the offset immediately after the entry being packed. The default implementation of Pack() is usually sufficient.
-6. CheckSize() - checks that the contents of all the entries fits within -the image size. If the image does not have a defined size, the size is set -large enough to hold all the entries. +Note: for sections, this also sets the size of the entry to be large enough +for all entries it contains.
-7. CheckEntries() - checks that the entries do not overlap, nor extend +6. CheckEntries() - checks that the entries do not overlap, nor extend outside the image.
-8. SetImagePos() - sets the image position of every entry. This is the absolute +7. SetImagePos() - sets the image position of every entry. This is the absolute position 'image-pos', as opposed to 'offset' which is relative to the containing section. This must be done after all offsets are known, which is why it is quite late in the ordering.
-9. SetCalculatedProperties() - update any calculated properties in the device +8. SetCalculatedProperties() - update any calculated properties in the device tree. This sets the correct 'offset' and 'size' vaues, for example.
-10. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry. +9. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry. The default implementatoin does nothing. This can be overriden to adjust the contents of an entry in some way. For example, it would be possible to create an entry containing a hash of the contents of some other entries. At this stage the offset and size of entries should not be adjusted unless absolutely necessary, since it requires a repack (going back to PackEntries()).
-11. ResetForPack() - if the ProcessEntryContents() step failed, in that an entry +10. ResetForPack() - if the ProcessEntryContents() step failed, in that an entry has changed its size, then there is no alternative but to go back to step 5 and try again, repacking the entries with the updated size. ResetForPack() removes the fixed offset/size values added by binman, so that the packing can start from scratch.
-12. WriteSymbols() - write the value of symbols into the U-Boot SPL binary. +11. WriteSymbols() - write the value of symbols into the U-Boot SPL binary. See 'Access to binman entry offsets at run time' below for a description of what happens in this stage.
-13. BuildImage() - builds the image and writes it to a file +12. BuildImage() - builds the image and writes it to a file
-14. WriteMap() - writes a text file containing a map of the image. This is the +13. WriteMap() - writes a text file containing a map of the image. This is the final step.
diff --git a/tools/binman/control.py b/tools/binman/control.py index 26f1cf462ec..9eeac5db995 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -513,7 +513,6 @@ def ProcessImage(image, update_fdt, write_map, get_contents=True, for pack_pass in range(passes): try: image.PackEntries() - image.CheckSize() image.CheckEntries() except Exception as e: if write_map:

At present CheckSize() is called from the function that packs the entries. Move it up to the main Pack() function so that _PackEntries() can just do the packing.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index c883f0d67c4..f93469a170b 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -264,6 +264,9 @@ class Entry_section(Entry): self._SortEntries() self._ExpandEntries()
+ size = self.CheckSize() + self.size = size + return super().Pack(offset)
def _PackEntries(self): @@ -271,7 +274,7 @@ class Entry_section(Entry): offset = self._skip_at_start for entry in self._entries.values(): offset = entry.Pack(offset) - self.size = self.CheckSize() + return offset
def _ExpandEntries(self): """Expand any entries that are permitted to"""

This method introduces a separation between packing and checking that is different for sections. In order to handle compression properly, we need to be able to deal with a section's size being smaller than the uncompressed size of its contents. It is easier to make this work if everything happens in the Pack() method.
The only real user of CheckEntries() is entry_Section and it can call it directly. Drop the call from 'control' and handle it locally.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README | 22 ++++++++++------------ tools/binman/control.py | 1 - tools/binman/etype/section.py | 4 +++- 3 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/tools/binman/README b/tools/binman/README index c7c787c99fd..c14cee5d115 100644 --- a/tools/binman/README +++ b/tools/binman/README @@ -712,40 +712,38 @@ size of an entry. The 'current' image offset is passed in, and the function returns the offset immediately after the entry being packed. The default implementation of Pack() is usually sufficient.
-Note: for sections, this also sets the size of the entry to be large enough -for all entries it contains. +Note: for sections, this also checks that the entries do not overlap, nor extend +outside the section. If the section does not have a defined size, the size is +set large enough to hold all the entries.
-6. CheckEntries() - checks that the entries do not overlap, nor extend -outside the image. - -7. SetImagePos() - sets the image position of every entry. This is the absolute +6. SetImagePos() - sets the image position of every entry. This is the absolute position 'image-pos', as opposed to 'offset' which is relative to the containing section. This must be done after all offsets are known, which is why it is quite late in the ordering.
-8. SetCalculatedProperties() - update any calculated properties in the device +7. SetCalculatedProperties() - update any calculated properties in the device tree. This sets the correct 'offset' and 'size' vaues, for example.
-9. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry. +8. ProcessEntryContents() - this calls Entry.ProcessContents() on each entry. The default implementatoin does nothing. This can be overriden to adjust the contents of an entry in some way. For example, it would be possible to create an entry containing a hash of the contents of some other entries. At this stage the offset and size of entries should not be adjusted unless absolutely necessary, since it requires a repack (going back to PackEntries()).
-10. ResetForPack() - if the ProcessEntryContents() step failed, in that an entry +9. ResetForPack() - if the ProcessEntryContents() step failed, in that an entry has changed its size, then there is no alternative but to go back to step 5 and try again, repacking the entries with the updated size. ResetForPack() removes the fixed offset/size values added by binman, so that the packing can start from scratch.
-11. WriteSymbols() - write the value of symbols into the U-Boot SPL binary. +10. WriteSymbols() - write the value of symbols into the U-Boot SPL binary. See 'Access to binman entry offsets at run time' below for a description of what happens in this stage.
-12. BuildImage() - builds the image and writes it to a file +11. BuildImage() - builds the image and writes it to a file
-13. WriteMap() - writes a text file containing a map of the image. This is the +12. WriteMap() - writes a text file containing a map of the image. This is the final step.
diff --git a/tools/binman/control.py b/tools/binman/control.py index 9eeac5db995..072417f3644 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -513,7 +513,6 @@ def ProcessImage(image, update_fdt, write_map, get_contents=True, for pack_pass in range(passes): try: image.PackEntries() - image.CheckEntries() except Exception as e: if write_map: fname = image.WriteMap() diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index f93469a170b..1618bebe4be 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -267,7 +267,9 @@ class Entry_section(Entry): size = self.CheckSize() self.size = size
- return super().Pack(offset) + offset = super().Pack(offset) + self.CheckEntries() + return offset
def _PackEntries(self): """Pack all entries into the section"""

At present this function assumes that the size of a section is at least as large as its contents. With compression this is often not the case. Relax this constraint by using the uncompressed size, if available.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 1618bebe4be..b146239b779 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -299,19 +299,21 @@ class Entry_section(Entry):
def CheckEntries(self): """Check that entries do not overlap or extend outside the section""" + max_size = self.size if self.uncomp_size is None else self.uncomp_size + offset = 0 prev_name = 'None' for entry in self._entries.values(): entry.CheckEntries() if (entry.offset < self._skip_at_start or entry.offset + entry.size > self._skip_at_start + - self.size): + max_size): entry.Raise('Offset %#x (%d) size %#x (%d) is outside the ' "section '%s' starting at %#x (%d) " 'of size %#x (%d)' % (entry.offset, entry.offset, entry.size, entry.size, self._node.path, self._skip_at_start, - self._skip_at_start, self.size, self.size)) + self._skip_at_start, max_size, max_size)) if entry.offset < offset and entry.size: entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' " "ending at %#x (%d)" %

At present this function adds up the total size of entries to work out the size of a section's contents. With compression this is no-longer enough.
We may as well bite the bullet and build the section contents instead. Call _BuildSectionData() to get the (possibly compressed) contents and GetPaddedData() to get the same but with padding added.
Note that this is inefficient since the section contents is calculated twice. Future work will improve this.
This affects testPackOverlapMap() since the error is reported with a different section size now (enough to hold the contents). Update that at the same time.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 11 ++++------- tools/binman/ftest.py | 2 +- 2 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index b146239b779..570dbfcfd41 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -544,16 +544,13 @@ class Entry_section(Entry):
def CheckSize(self): - """Check that the section contents does not exceed its size, etc.""" - contents_size = 0 - for entry in self._entries.values(): - contents_size = max(contents_size, entry.offset + entry.size) - - contents_size -= self._skip_at_start + data = self._BuildSectionData() + contents_size = len(data)
size = self.size if not size: - size = self.pad_before + contents_size + self.pad_after + data = self.GetPaddedData() + size = len(data) size = tools.Align(size, self.align_size)
if self.size and contents_size > self.size: diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index bcd4482717a..8b7537e4eca 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -2022,7 +2022,7 @@ class TestFunctional(unittest.TestCase): self.assertTrue(os.path.exists(map_fname)) map_data = tools.ReadFile(map_fname, binary=False) self.assertEqual('''ImagePos Offset Size Name -<none> 00000000 00000007 main-section +<none> 00000000 00000008 main-section <none> 00000000 00000004 u-boot <none> 00000003 00000004 u-boot-align ''', map_data)

With the previous changes, it is now possible to compress entire sections. Add some tests to check that compression works correctly, including updating the metadata.
Also update the documentation.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/README | 8 + tools/binman/ftest.py | 217 ++++++++++++++++++ tools/binman/test/179_compress_image.dts | 14 ++ tools/binman/test/180_compress_image_less.dts | 14 ++ .../binman/test/181_compress_section_size.dts | 17 ++ tools/binman/test/182_compress_section.dts | 16 ++ tools/binman/test/183_compress_extra.dts | 37 +++ 7 files changed, 323 insertions(+) create mode 100644 tools/binman/test/179_compress_image.dts create mode 100644 tools/binman/test/180_compress_image_less.dts create mode 100644 tools/binman/test/181_compress_section_size.dts create mode 100644 tools/binman/test/182_compress_section.dts create mode 100644 tools/binman/test/183_compress_extra.dts
diff --git a/tools/binman/README b/tools/binman/README index c14cee5d115..de1eedfc3f7 100644 --- a/tools/binman/README +++ b/tools/binman/README @@ -853,6 +853,14 @@ The entry will then contain the compressed data, using the 'lz4' compression algorithm. Currently this is the only one that is supported. The uncompressed size is written to the node in an 'uncomp-size' property, if -u is used.
+Compression is also supported for sections. In that case the entire section is +compressed in one block, including all its contents. This means that accessing +an entry from the section required decompressing the entire section. Also, the +size of a section indicates the space that it consumes in its parent section +(and typically the image). With compression, the section may contain more data, +and the uncomp-size property indicates that, as above. The contents of the +section is compressed first, before any padding is added. This ensures that the +padding itself is not compressed, which would be a waste of time.
Map files diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py index 8b7537e4eca..403cd836b71 100644 --- a/tools/binman/ftest.py +++ b/tools/binman/ftest.py @@ -70,6 +70,7 @@ VBLOCK_DATA = b'vblk' FILES_DATA = (b"sorry I'm late\nOh, don't bother apologising, I'm " + b"sorry you're alive\n") COMPRESS_DATA = b'compress xxxxxxxxxxxxxxxxxxxxxx data' +COMPRESS_DATA_BIG = COMPRESS_DATA * 2 REFCODE_DATA = b'refcode' FSP_M_DATA = b'fsp_m' FSP_S_DATA = b'fsp_s' @@ -174,6 +175,7 @@ class TestFunctional(unittest.TestCase): os.path.join(cls._indir, 'files'))
TestFunctional._MakeInputFile('compress', COMPRESS_DATA) + TestFunctional._MakeInputFile('compress_big', COMPRESS_DATA_BIG) TestFunctional._MakeInputFile('bl31.bin', ATF_BL31_DATA)
# Add a few .dtb files for testing @@ -3857,6 +3859,221 @@ class TestFunctional(unittest.TestCase): tools.GetBytes(ord('!'), 4)) # padding to section size self.assertEqual(expected, data)
+ def testCompressImage(self): + """Test compression of the entire image""" + self._CheckLz4() + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '179_compress_image.dts', use_real_dtb=True, update_dtb=True) + dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', + 'uncomp-size']) + orig = self._decompress(data) + self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) + + # Do a sanity check on various fields + image = control.images['image'] + entries = image.GetEntries() + self.assertEqual(2, len(entries)) + + entry = entries['blob'] + self.assertEqual(COMPRESS_DATA, entry.data) + self.assertEqual(len(COMPRESS_DATA), entry.size) + + entry = entries['u-boot'] + self.assertEqual(U_BOOT_DATA, entry.data) + self.assertEqual(len(U_BOOT_DATA), entry.size) + + self.assertEqual(len(data), image.size) + self.assertEqual(COMPRESS_DATA + U_BOOT_DATA, image.uncomp_data) + self.assertEqual(len(COMPRESS_DATA + U_BOOT_DATA), image.uncomp_size) + orig = self._decompress(image.data) + self.assertEqual(orig, image.uncomp_data) + + expected = { + 'blob:offset': 0, + 'blob:size': len(COMPRESS_DATA), + 'u-boot:offset': len(COMPRESS_DATA), + 'u-boot:size': len(U_BOOT_DATA), + 'uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA), + 'offset': 0, + 'image-pos': 0, + 'size': len(data), + } + self.assertEqual(expected, props) + + def testCompressImageLess(self): + """Test compression where compression reduces the image size""" + self._CheckLz4() + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '180_compress_image_less.dts', use_real_dtb=True, update_dtb=True) + dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', + 'uncomp-size']) + orig = self._decompress(data) + + self.assertEquals(COMPRESS_DATA + COMPRESS_DATA + U_BOOT_DATA, orig) + + # Do a sanity check on various fields + image = control.images['image'] + entries = image.GetEntries() + self.assertEqual(2, len(entries)) + + entry = entries['blob'] + self.assertEqual(COMPRESS_DATA_BIG, entry.data) + self.assertEqual(len(COMPRESS_DATA_BIG), entry.size) + + entry = entries['u-boot'] + self.assertEqual(U_BOOT_DATA, entry.data) + self.assertEqual(len(U_BOOT_DATA), entry.size) + + self.assertEqual(len(data), image.size) + self.assertEqual(COMPRESS_DATA_BIG + U_BOOT_DATA, image.uncomp_data) + self.assertEqual(len(COMPRESS_DATA_BIG + U_BOOT_DATA), + image.uncomp_size) + orig = self._decompress(image.data) + self.assertEqual(orig, image.uncomp_data) + + expected = { + 'blob:offset': 0, + 'blob:size': len(COMPRESS_DATA_BIG), + 'u-boot:offset': len(COMPRESS_DATA_BIG), + 'u-boot:size': len(U_BOOT_DATA), + 'uncomp-size': len(COMPRESS_DATA_BIG + U_BOOT_DATA), + 'offset': 0, + 'image-pos': 0, + 'size': len(data), + } + self.assertEqual(expected, props) + + def testCompressSectionSize(self): + """Test compression of a section with a fixed size""" + self._CheckLz4() + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '181_compress_section_size.dts', use_real_dtb=True, update_dtb=True) + dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', + 'uncomp-size']) + orig = self._decompress(data) + self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) + expected = { + 'section/blob:offset': 0, + 'section/blob:size': len(COMPRESS_DATA), + 'section/u-boot:offset': len(COMPRESS_DATA), + 'section/u-boot:size': len(U_BOOT_DATA), + 'section:offset': 0, + 'section:image-pos': 0, + 'section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA), + 'section:size': 0x30, + 'offset': 0, + 'image-pos': 0, + 'size': 0x30, + } + self.assertEqual(expected, props) + + def testCompressSection(self): + """Test compression of a section with no fixed size""" + self._CheckLz4() + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '182_compress_section.dts', use_real_dtb=True, update_dtb=True) + dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', + 'uncomp-size']) + orig = self._decompress(data) + self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, orig) + expected = { + 'section/blob:offset': 0, + 'section/blob:size': len(COMPRESS_DATA), + 'section/u-boot:offset': len(COMPRESS_DATA), + 'section/u-boot:size': len(U_BOOT_DATA), + 'section:offset': 0, + 'section:image-pos': 0, + 'section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA), + 'section:size': len(data), + 'offset': 0, + 'image-pos': 0, + 'size': len(data), + } + self.assertEqual(expected, props) + + def testCompressExtra(self): + """Test compression of a section with no fixed size""" + self._CheckLz4() + data, _, _, out_dtb_fname = self._DoReadFileDtb( + '183_compress_extra.dts', use_real_dtb=True, update_dtb=True) + dtb = fdt.Fdt(out_dtb_fname) + dtb.Scan() + props = self._GetPropTree(dtb, ['offset', 'image-pos', 'size', + 'uncomp-size']) + + base = data[len(U_BOOT_DATA):] + self.assertEquals(U_BOOT_DATA, base[:len(U_BOOT_DATA)]) + rest = base[len(U_BOOT_DATA):] + + # Check compressed data + section1 = self._decompress(rest) + expect1 = tools.Compress(COMPRESS_DATA + U_BOOT_DATA, 'lz4') + self.assertEquals(expect1, rest[:len(expect1)]) + self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, section1) + rest1 = rest[len(expect1):] + + section2 = self._decompress(rest1) + expect2 = tools.Compress(COMPRESS_DATA + COMPRESS_DATA, 'lz4') + self.assertEquals(expect2, rest1[:len(expect2)]) + self.assertEquals(COMPRESS_DATA + COMPRESS_DATA, section2) + rest2 = rest1[len(expect2):] + + expect_size = (len(U_BOOT_DATA) + len(U_BOOT_DATA) + len(expect1) + + len(expect2) + len(U_BOOT_DATA)) + #self.assertEquals(expect_size, len(data)) + + #self.assertEquals(U_BOOT_DATA, rest2) + + self.maxDiff = None + expected = { + 'u-boot:offset': 0, + 'u-boot:image-pos': 0, + 'u-boot:size': len(U_BOOT_DATA), + + 'base:offset': len(U_BOOT_DATA), + 'base:image-pos': len(U_BOOT_DATA), + 'base:size': len(data) - len(U_BOOT_DATA), + 'base/u-boot:offset': 0, + 'base/u-boot:image-pos': len(U_BOOT_DATA), + 'base/u-boot:size': len(U_BOOT_DATA), + 'base/u-boot2:offset': len(U_BOOT_DATA) + len(expect1) + + len(expect2), + 'base/u-boot2:image-pos': len(U_BOOT_DATA) * 2 + len(expect1) + + len(expect2), + 'base/u-boot2:size': len(U_BOOT_DATA), + + 'base/section:offset': len(U_BOOT_DATA), + 'base/section:image-pos': len(U_BOOT_DATA) * 2, + 'base/section:size': len(expect1), + 'base/section:uncomp-size': len(COMPRESS_DATA + U_BOOT_DATA), + 'base/section/blob:offset': 0, + 'base/section/blob:size': len(COMPRESS_DATA), + 'base/section/u-boot:offset': len(COMPRESS_DATA), + 'base/section/u-boot:size': len(U_BOOT_DATA), + + 'base/section2:offset': len(U_BOOT_DATA) + len(expect1), + 'base/section2:image-pos': len(U_BOOT_DATA) * 2 + len(expect1), + 'base/section2:size': len(expect2), + 'base/section2:uncomp-size': len(COMPRESS_DATA + COMPRESS_DATA), + 'base/section2/blob:offset': 0, + 'base/section2/blob:size': len(COMPRESS_DATA), + 'base/section2/blob2:offset': len(COMPRESS_DATA), + 'base/section2/blob2:size': len(COMPRESS_DATA), + + 'offset': 0, + 'image-pos': 0, + 'size': len(data), + } + self.assertEqual(expected, props) +
if __name__ == "__main__": unittest.main() diff --git a/tools/binman/test/179_compress_image.dts b/tools/binman/test/179_compress_image.dts new file mode 100644 index 00000000000..4176b7f2e62 --- /dev/null +++ b/tools/binman/test/179_compress_image.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + compress = "lz4"; + blob { + filename = "compress"; + }; + + u-boot { + }; + }; +}; diff --git a/tools/binman/test/180_compress_image_less.dts b/tools/binman/test/180_compress_image_less.dts new file mode 100644 index 00000000000..1d9d57b78c9 --- /dev/null +++ b/tools/binman/test/180_compress_image_less.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + compress = "lz4"; + blob { + filename = "compress_big"; + }; + + u-boot { + }; + }; +}; diff --git a/tools/binman/test/181_compress_section_size.dts b/tools/binman/test/181_compress_section_size.dts new file mode 100644 index 00000000000..95ed30add1a --- /dev/null +++ b/tools/binman/test/181_compress_section_size.dts @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + section { + size = <0x30>; + compress = "lz4"; + blob { + filename = "compress"; + }; + + u-boot { + }; + }; + }; +}; diff --git a/tools/binman/test/182_compress_section.dts b/tools/binman/test/182_compress_section.dts new file mode 100644 index 00000000000..dc3e340c5d6 --- /dev/null +++ b/tools/binman/test/182_compress_section.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + section { + compress = "lz4"; + blob { + filename = "compress"; + }; + + u-boot { + }; + }; + }; +}; diff --git a/tools/binman/test/183_compress_extra.dts b/tools/binman/test/183_compress_extra.dts new file mode 100644 index 00000000000..59aae822638 --- /dev/null +++ b/tools/binman/test/183_compress_extra.dts @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ +/dts-v1/; + +/ { + binman { + u-boot { + }; + base { + type = "section"; + u-boot { + }; + section { + compress = "lz4"; + blob { + filename = "compress"; + }; + + u-boot { + }; + }; + section2 { + type = "section"; + compress = "lz4"; + blob { + filename = "compress"; + }; + blob2 { + type = "blob"; + filename = "compress"; + }; + }; + u-boot2 { + type = "u-boot"; + }; + }; + }; +};

Refactor the implementation slightly so that section data is not rebuilt when it is already available.
We still have GetData() set up to rebuild the section, since we don't currently track when things change that might affect a section. For example, if a blob is updated within a section, we must rebuild it. Tracking that would be possible but is more complex, so it left for another time.
Signed-off-by: Simon Glass sjg@chromium.org ---
tools/binman/etype/section.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py index 570dbfcfd41..3dd5f58c4c2 100644 --- a/tools/binman/etype/section.py +++ b/tools/binman/etype/section.py @@ -147,7 +147,7 @@ class Entry_section(Entry): def ObtainContents(self): return self.GetEntryContents()
- def GetPaddedDataForEntry(self, entry): + def GetPaddedDataForEntry(self, entry, entry_data): """Get the data for an entry including any padding
Gets the entry data and uses the section pad-byte value to add padding @@ -170,7 +170,7 @@ class Entry_section(Entry): data += tools.GetBytes(self._pad_byte, entry.pad_before)
# Add in the actual entry data - data += entry.GetData() + data += entry_data
# Handle padding after the entry if entry.pad_after: @@ -197,7 +197,7 @@ class Entry_section(Entry): section_data = b''
for entry in self._entries.values(): - data = self.GetPaddedDataForEntry(entry) + data = self.GetPaddedDataForEntry(entry, entry.GetData()) # Handle empty space before the entry pad = (entry.offset or 0) - self._skip_at_start - len(section_data) if pad > 0: @@ -210,7 +210,7 @@ class Entry_section(Entry): (len(self._entries), len(section_data))) return self.CompressData(section_data)
- def GetPaddedData(self): + def GetPaddedData(self, data=None): """Get the data for a section including any padding
Gets the section data and uses the parent section's pad-byte value to @@ -225,7 +225,9 @@ class Entry_section(Entry): after it (bytes) """ section = self.section or self - return section.GetPaddedDataForEntry(self) + if data is None: + data = self.GetData() + return section.GetPaddedDataForEntry(self, data)
def GetData(self): """Get the contents of an entry @@ -264,8 +266,10 @@ class Entry_section(Entry): self._SortEntries() self._ExpandEntries()
- size = self.CheckSize() - self.size = size + data = self._BuildSectionData() + self.SetContents(data) + + self.CheckSize()
offset = super().Pack(offset) self.CheckEntries() @@ -542,14 +546,12 @@ class Entry_section(Entry): for name, info in offset_dict.items(): self._SetEntryOffsetSize(name, *info)
- def CheckSize(self): - data = self._BuildSectionData() - contents_size = len(data) + contents_size = len(self.data)
size = self.size if not size: - data = self.GetPaddedData() + data = self.GetPaddedData(self.data) size = len(data) size = tools.Align(size, self.align_size)
participants (2)
-
Alper Nebi Yasak
-
Simon Glass