[U-Boot-Users] [PATCH 1/4] Add ALIGN() macro

ALIGN() returns the smallest aligned value greater than the passed in address or size. Taken from Linux.
Signed-off-by: Andy Fleming afleming@freescale.com --- include/common.h | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-)
diff --git a/include/common.h b/include/common.h index d0f5704..68e0cbc 100644 --- a/include/common.h +++ b/include/common.h @@ -671,6 +671,9 @@ void __attribute__((weak)) show_boot_progress (int val); #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + /* Multicore arch functions */ #ifdef CONFIG_MP int cpu_status(int nr);

lmb_free allows us to unreserve some memory so we can use lmb_alloc_base or lmb_reserve to temporarily reserve some memory.
Signed-off-by: Andy Fleming afleming@freescale.com --- lib_generic/lmb.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 49 insertions(+), 0 deletions(-)
diff --git a/lib_generic/lmb.c b/lib_generic/lmb.c index 3b8c805..a34e2d3 100644 --- a/lib_generic/lmb.c +++ b/lib_generic/lmb.c @@ -180,6 +180,55 @@ long lmb_add(struct lmb *lmb, ulong base, ulong size) return lmb_add_region(_rgn, base, size); }
+long lmb_free(struct lmb *lmb, u64 base, u64 size) +{ + struct lmb_region *rgn = &(lmb->reserved); + u64 rgnbegin, rgnend; + u64 end = base + size; + int i; + + rgnbegin = rgnend = 0; /* supress gcc warnings */ + + /* Find the region where (base, size) belongs to */ + for (i=0; i < rgn->cnt; i++) { + rgnbegin = rgn->region[i].base; + rgnend = rgnbegin + rgn->region[i].size; + + if ((rgnbegin <= base) && (end <= rgnend)) + break; + } + + /* Didn't find the region */ + if (i == rgn->cnt) + return -1; + + /* Check to see if we are removing entire region */ + if ((rgnbegin == base) && (rgnend == end)) { + lmb_remove_region(rgn, i); + return 0; + } + + /* Check to see if region is matching at the front */ + if (rgnbegin == base) { + rgn->region[i].base = end; + rgn->region[i].size -= size; + return 0; + } + + /* Check to see if the region is matching at the end */ + if (rgnend == end) { + rgn->region[i].size -= size; + return 0; + } + + /* + * We need to split the entry - adjust the current one to the + * beginging of the hole and add the region after hole. + */ + rgn->region[i].size = base - rgn->region[i].base; + return lmb_add_region(rgn, end, rgnend - end); +} + long lmb_reserve(struct lmb *lmb, ulong base, ulong size) { struct lmb_region *_rgn = &(lmb->reserved);

__lmb_alloc_base can underflow if it fails to find free space. This was fixed in linux with commit d9024df02ffe74d723d97d552f86de3b34beb8cc. This patch merely updates __lmb_alloc_base to resemble the current version in Linux.
Signed-off-by: Andy Fleming afleming@freescale.com --- lib_generic/lmb.c | 34 +++++++++++++++++++--------------- 1 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/lib_generic/lmb.c b/lib_generic/lmb.c index a34e2d3..c27a540 100644 --- a/lib_generic/lmb.c +++ b/lib_generic/lmb.c @@ -284,11 +284,14 @@ ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) { long i, j; ulong base = 0; + ulong res_base;
for (i = lmb->memory.cnt-1; i >= 0; i--) { ulong lmbbase = lmb->memory.region[i].base; ulong lmbsize = lmb->memory.region[i].size;
+ if (lmbsize < size) + continue; if (max_addr == LMB_ALLOC_ANYWHERE) base = lmb_align_down(lmbbase + lmbsize - size, align); else if (lmbbase < max_addr) { @@ -297,22 +300,23 @@ ulong __lmb_alloc_base(struct lmb *lmb, ulong size, ulong align, ulong max_addr) } else continue;
- while ((lmbbase <= base) && - ((j = lmb_overlaps_region(&(lmb->reserved), base, size)) >= 0) ) - base = lmb_align_down(lmb->reserved.region[j].base - size, - align); - - if ((base != 0) && (lmbbase <= base)) - break; + while (base && lmbbase <= base) { + j = lmb_overlaps_region(&lmb->reserved, base, size); + if (j < 0) { + /* This area isn't reserved, take it */ + if (lmb_add_region(&lmb->reserved, base, + lmb_align_up(size, + align)) < 0) + return 0; + return base; + } + res_base = lmb->reserved.region[j].base; + if (res_base < size) + break; + base = lmb_align_down(res_base - size, align); + } } - - if (i < 0) - return 0; - - if (lmb_add_region(&(lmb->reserved), base, lmb_align_up(size, align)) < 0) - return 0; - - return base; + return 0; }
int lmb_is_reserved(struct lmb *lmb, ulong addr)

Current code requires that a compiled device tree have space added to the end to leave room for extra nodes added by board code (and the chosen node). This requires that device tree creators anticipate how much space U-Boot will add to the tree, which is absurd. Ideally, the code would resize and/or relocate the tree when it needed more space, but this would require a systemic change to the fdt code, which is non-trivial. Instead, we resize the tree inside boot_relocate_fdt, reserving either the remainder of the bootmap (in the case where the fdt is inside the bootmap), or adding CFG_FDT_PAD bytes to the size.
Signed-off-by: Andy Fleming afleming@freescale.com --- lib_ppc/bootm.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 58 insertions(+), 8 deletions(-)
diff --git a/lib_ppc/bootm.c b/lib_ppc/bootm.c index 9194fd8..85e959a 100644 --- a/lib_ppc/bootm.c +++ b/lib_ppc/bootm.c @@ -51,6 +51,10 @@ static int boot_relocate_fdt (struct lmb *lmb, ulong bootmap_base, #include <asm/cache.h> #endif
+#ifndef CFG_FDT_PAD +#define CFG_FDT_PAD 0x3000 +#endif + DECLARE_GLOBAL_DATA_PTR;
extern int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]); @@ -189,6 +193,44 @@ do_bootm_linux(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[], ft_board_setup(of_flat_tree, gd->bd); #endif } + + /* Fixup the fdt memreserve now that we know how big it is */ + if (of_flat_tree) { + int j; + uint64_t addr, size; + int total = fdt_num_mem_rsv(of_flat_tree); + uint actualsize; + + for (j = 0; j < total; j++) { + fdt_get_mem_rsv(of_flat_tree, j, &addr, &size); + if (addr == (uint64_t)of_flat_tree) { + fdt_del_mem_rsv(of_flat_tree, j); + break; + } + } + + /* Delete the old LMB reservation */ + lmb_free(lmb, of_flat_tree, fdt_totalsize(of_flat_tree)); + + /* Calculate the actual size of the fdt */ + actualsize = fdt_off_dt_strings(of_flat_tree) + + fdt_size_dt_strings(of_flat_tree); + + /* Make it so the fdt ends on a page boundary */ + actualsize = ALIGN(actualsize, 0x1000); + actualsize = actualsize - ((uint)of_flat_tree & 0xfff); + + /* Change the fdt header to reflect the correct size */ + fdt_set_totalsize(of_flat_tree, actualsize); + of_size = actualsize; + + /* Add the new reservation */ + ret = fdt_add_mem_rsv(of_flat_tree, (uint)of_flat_tree, + of_size); + + /* Create a new LMB reservation */ + lmb_reserve(lmb, (ulong)of_flat_tree, of_size); + } #endif /* CONFIG_OF_LIBFDT */
ret = boot_ramdisk_high (lmb, rd_data_start, rd_len, &initrd_start, &initrd_end); @@ -711,22 +753,25 @@ static int boot_relocate_fdt (struct lmb *lmb, ulong bootmap_base, #endif
/* - * The blob must be within CFG_BOOTMAPSZ, - * so we flag it to be copied if it is not. + * The blob needs to be inside the boot mapping. */ - if (fdt_blob >= (char *)CFG_BOOTMAPSZ) + if (fdt_blob < (char *)bootmap_base) relocate = 1;
- of_len = be32_to_cpu (fdt_totalsize (fdt_blob)); + if ((fdt_blob + *of_size + CFG_FDT_PAD) >= + ((char *)CFG_BOOTMAPSZ + bootmap_base)) + relocate = 1;
/* move flattend device tree if needed */ if (relocate) { int err; - ulong of_start; + ulong of_start = 0;
/* position on a 4K boundary before the alloc_current */ + /* Make the fdt as large as possible */ + of_len = *of_size + CFG_FDT_PAD; of_start = lmb_alloc_base(lmb, of_len, 0x1000, - (CFG_BOOTMAPSZ + bootmap_base)); + (CFG_BOOTMAPSZ + bootmap_base));
if (of_start == 0) { puts("device tree - allocation error\n"); @@ -734,7 +779,7 @@ static int boot_relocate_fdt (struct lmb *lmb, ulong bootmap_base, }
debug ("## device tree at 0x%08lX ... 0x%08lX (len=%ld=0x%lX)\n", - (ulong)fdt_blob, (ulong)fdt_blob + of_len - 1, + (ulong)fdt_blob, (ulong)fdt_blob + *of_size - 1, of_len, of_len);
printf (" Loading Device Tree to %08lx, end %08lx ... ", @@ -748,9 +793,14 @@ static int boot_relocate_fdt (struct lmb *lmb, ulong bootmap_base, puts ("OK\n");
*of_flat_tree = (char *)of_start; + *of_size = of_len; } else { *of_flat_tree = fdt_blob; - lmb_reserve(lmb, (ulong)fdt, of_len); + of_len = (CFG_BOOTMAPSZ + bootmap_base) - (ulong)fdt_blob; + lmb_reserve(lmb, (ulong)fdt_blob, of_len); + fdt_set_totalsize(*of_flat_tree, of_len); + + *of_size = of_len; }
return 0;

On 15:24 Tue 20 May , Andy Fleming wrote:
lmb_free allows us to unreserve some memory so we can use lmb_alloc_base or lmb_reserve to temporarily reserve some memory.
Signed-off-by: Andy Fleming afleming@freescale.com
lib_generic/lmb.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 49 insertions(+), 0 deletions(-)
diff --git a/lib_generic/lmb.c b/lib_generic/lmb.c index 3b8c805..a34e2d3 100644 --- a/lib_generic/lmb.c +++ b/lib_generic/lmb.c @@ -180,6 +180,55 @@ long lmb_add(struct lmb *lmb, ulong base, ulong size) return lmb_add_region(_rgn, base, size); }
+long lmb_free(struct lmb *lmb, u64 base, u64 size) +{
- struct lmb_region *rgn = &(lmb->reserved);
- u64 rgnbegin, rgnend;
- u64 end = base + size;
- int i;
- rgnbegin = rgnend = 0; /* supress gcc warnings */
- /* Find the region where (base, size) belongs to */
- for (i=0; i < rgn->cnt; i++) {
please i = 0
rgnbegin = rgn->region[i].base;
rgnend = rgnbegin + rgn->region[i].size;
if ((rgnbegin <= base) && (end <= rgnend))
break;
Best Regards, J.

In message 20080520211104.GC18756@game.jcrosoft.org you wrote:
- for (i=0; i < rgn->cnt; i++) {
please i = 0
Andy, please leave as it was.
Jean-Christophe - this is really nitpicking, and in my opinion your version is more difficult to read.
Best regards,
Wolfgang Denk

On 15:24 Tue 20 May , Andy Fleming wrote:
ALIGN() returns the smallest aligned value greater than the passed in address or size. Taken from Linux.
Signed-off-by: Andy Fleming afleming@freescale.com
include/common.h | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-)
diff --git a/include/common.h b/include/common.h index d0f5704..68e0cbc 100644 --- a/include/common.h +++ b/include/common.h @@ -671,6 +671,9 @@ void __attribute__((weak)) show_boot_progress (int val); #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
please fix coding style and use tab instead of space for indent
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x)) (a) - 1) #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
Best Regards, J.

Jean-Christophe PLAGNIOL-VILLARD wrote:
On 15:24 Tue 20 May , Andy Fleming wrote:
ALIGN() returns the smallest aligned value greater than the passed in address or size. Taken from Linux.
Signed-off-by: Andy Fleming afleming@freescale.com
include/common.h | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-)
diff --git a/include/common.h b/include/common.h index d0f5704..68e0cbc 100644 --- a/include/common.h +++ b/include/common.h @@ -671,6 +671,9 @@ void __attribute__((weak)) show_boot_progress (int val); #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
please fix coding style and use tab instead of space for indent
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x)) (a) - 1) #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
That's not indenting, that's alignment. Using tabs after anything that is not a tab causes the code to look awful if the tab stops are disturbed, such as by viewing with a different tab size, or as part of a patch.
-Scott

In message 4833487F.1010502@freescale.com you wrote:
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x)) (a) - 1) #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
That's not indenting, that's alignment. Using tabs after anything that
OK. Then I will add this requirement to the Coding Style documentation now.
is not a tab causes the code to look awful if the tab stops are disturbed, such as by viewing with a different tab size, or as part of a patch.
What do you mean by "a different tab size"? TABs are always 8 characters, always and everywhere.
Best regards,
Wolfgang Denk

In message 20080520210914.GB18756@game.jcrosoft.org you wrote:
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
please fix coding style and use tab instead of space for indent
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x)) (a) - 1)
Actually that should be
... __ALIGN_MASK((x), (typeof(x)) (a) - 1)
Best regards,
Wolfgang Denk
participants (4)
-
Andy Fleming
-
Jean-Christophe PLAGNIOL-VILLARD
-
Scott Wood
-
Wolfgang Denk