summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorPali Rohár <pali@kernel.org>2022-01-12 18:20:46 +0100
committerStefan Roese <sr@denx.de>2022-01-14 11:39:16 +0100
commitbdf8c9f219af279cbf9e037318ba44ef3bef1d6d (patch)
treedbdc177d0fbf829c7ec342ef59649f62c0f29097 /tools
parenta2b1db41cf0d21e8fca56d403f0d7834afe3a3ec (diff)
tools: kwbimage: Enforce 128-bit boundary alignment only for Sheeva CPU
This alignment is required only for platforms based on Sheeva CPU core which are A370 and AXP. Now when U-Boot build system correctly propagates LOAD_ADDRESS there is no need to have enabled 128-bit boundary alignment on platforms which do not need it. Previously it was required because load address was implicitly rounded to 128-bit boundary and U-Boot build system expected it and misused it. Now with explicit setting of LOAD_ADDRESS there is no guessing for load address anymore. Signed-off-by: Pali Rohár <pali@kernel.org> Reviewed-by: Stefan Roese <sr@denx.de>
Diffstat (limited to 'tools')
-rw-r--r--tools/kwbimage.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/tools/kwbimage.c b/tools/kwbimage.c
index ce053a4a5a..7c2106006a 100644
--- a/tools/kwbimage.c
+++ b/tools/kwbimage.c
@@ -1101,8 +1101,10 @@ static size_t image_headersz_v1(int *hasext)
return 0;
}
headersz = e->binary.loadaddr - base_addr;
- } else {
+ } else if (cpu_sheeva) {
headersz = ALIGN(headersz, 16);
+ } else {
+ headersz = ALIGN(headersz, 4);
}
headersz += ALIGN(s.st_size, 4) + sizeof(uint32_t);
@@ -1158,8 +1160,8 @@ static int add_binary_header_v1(uint8_t **cur, uint8_t **next_ext,
*cur += (binarye->binary.nargs + 1) * sizeof(uint32_t);
/*
- * ARM executable code inside the BIN header on some mvebu platforms
- * (e.g. A370, AXP) must always be aligned with the 128-bit boundary.
+ * ARM executable code inside the BIN header on platforms with Sheeva
+ * CPU (A370 and AXP) must always be aligned with the 128-bit boundary.
* In the case when this code is not position independent (e.g. ARM
* SPL), it must be placed at fixed load and execute address.
* This requirement can be met by inserting dummy arguments into
@@ -1170,8 +1172,10 @@ static int add_binary_header_v1(uint8_t **cur, uint8_t **next_ext,
offset = *cur - (uint8_t *)main_hdr;
if (binarye->binary.loadaddr)
add_args = (binarye->binary.loadaddr - base_addr - offset) / sizeof(uint32_t);
- else
+ else if (cpu_sheeva)
add_args = ((16 - offset % 16) % 16) / sizeof(uint32_t);
+ else
+ add_args = 0;
if (add_args) {
*(args - 1) = cpu_to_le32(binarye->binary.nargs + add_args);
*cur += add_args * sizeof(uint32_t);