[RFC PATCH 0/4] lmb: Tweaks as discussed

This series adds a few tweaks to Sugosh's lmb series to show: - save/restore of test stage - putting lmb in a struct - small code-size improvement
Simon Glass (4): lmb: Put internal details in a struct lmb: Tighten up the code in lmb_add_region_flags() lmb: Split init into two functions lmb: Ensure that tests don't change lmb state
common/board_r.c | 4 +- include/lmb.h | 53 +++++++------ lib/lmb.c | 193 +++++++++++++++++++--------------------------- test/cmd/bdinfo.c | 8 +- test/lmb_ut.c | 54 +++++++++---- 5 files changed, 154 insertions(+), 158 deletions(-)

Rather than exporting the internal details of lmb, create a struct which holds the lists.
Signed-off-by: Simon Glass sjg@chromium.org ---
include/lmb.h | 10 ++++++++- lib/lmb.c | 56 +++++++++++++++++++++++++---------------------- test/cmd/bdinfo.c | 8 +++---- 3 files changed, 43 insertions(+), 31 deletions(-)
diff --git a/include/lmb.h b/include/lmb.h index 25a984e14f3..19231439c34 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -13,7 +13,13 @@ * Copyright (C) 2001 Peter Bergner, IBM Corp. */
-struct alist; +#include <alist.h> + +struct lmb { + struct alist free_mem; + struct alist used_mem; +}; +
/** * enum lmb_flags - definition of memory region attributes @@ -121,6 +127,8 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, */ void lmb_mem_regions_uninit(struct alist *mem_lst, struct alist *used_lst);
+struct lmb *lmb_get(void); + #endif /* __KERNEL__ */
#endif /* _LINUX_LMB_H */ diff --git a/lib/lmb.c b/lib/lmb.c index 97ea26b013f..c11ce308c5b 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -24,8 +24,7 @@ DECLARE_GLOBAL_DATA_PTR; #define LMB_ALLOC_ANYWHERE 0 #define LMB_ALIST_INITIAL_SIZE 4
-struct alist lmb_free_mem; -struct alist lmb_used_mem; +static struct lmb lmb;
static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) { @@ -50,8 +49,8 @@ static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) void lmb_dump_all_force(void) { printf("lmb_dump_all:\n"); - lmb_dump_region(&lmb_free_mem, "memory"); - lmb_dump_region(&lmb_used_mem, "reserved"); + lmb_dump_region(&lmb.free_mem, "memory"); + lmb_dump_region(&lmb.used_mem, "reserved"); }
void lmb_dump_all(void) @@ -499,7 +498,7 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, /* This routine may be called with relocation disabled. */ long lmb_add(phys_addr_t base, phys_size_t size, uint flags) { - struct alist *lmb_rgn_lst = &lmb_free_mem; + struct alist *lmb_rgn_lst = &lmb.free_mem;
return lmb_add_region_flags(lmb_rgn_lst, base, size, flags); } @@ -507,7 +506,7 @@ long lmb_add(phys_addr_t base, phys_size_t size, uint flags) long lmb_free(phys_addr_t base, phys_size_t size, uint flags) { struct lmb_region *rgn; - struct alist *lmb_rgn_lst = &lmb_used_mem; + struct alist *lmb_rgn_lst = &lmb.used_mem; phys_addr_t rgnbegin, rgnend; phys_addr_t end = base + size - 1; int i; @@ -557,7 +556,7 @@ long lmb_free(phys_addr_t base, phys_size_t size, uint flags)
long lmb_reserve(phys_addr_t base, phys_size_t size, uint flags) { - struct alist *lmb_rgn_lst = &lmb_used_mem; + struct alist *lmb_rgn_lst = &lmb.used_mem;
return lmb_add_region_flags(lmb_rgn_lst, base, size, flags); } @@ -589,10 +588,10 @@ static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align, long i, rgn; phys_addr_t base = 0; phys_addr_t res_base; - struct lmb_region *lmb_used = lmb_used_mem.data; - struct lmb_region *lmb_memory = lmb_free_mem.data; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data;
- for (i = lmb_free_mem.count - 1; i >= 0; i--) { + for (i = lmb.free_mem.count - 1; i >= 0; i--) { phys_addr_t lmbbase = lmb_memory[i].base; phys_size_t lmbsize = lmb_memory[i].size;
@@ -610,10 +609,10 @@ static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align, continue;
while (base && lmbbase <= base) { - rgn = lmb_overlaps_region(&lmb_used_mem, base, size); + rgn = lmb_overlaps_region(&lmb.used_mem, base, size); if (rgn < 0) { /* This area isn't reserved, take it */ - if (lmb_add_region_flags(&lmb_used_mem, base, + if (lmb_add_region_flags(&lmb.used_mem, base, size, flags) < 0) return 0; return base; @@ -651,10 +650,10 @@ static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size, enum lmb_flags flags) { long rgn; - struct lmb_region *lmb_memory = lmb_free_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data;
/* Check if the requested address is in one of the memory regions */ - rgn = lmb_overlaps_region(&lmb_free_mem, base, size); + rgn = lmb_overlaps_region(&lmb.free_mem, base, size); if (rgn >= 0) { /* * Check if the requested end address is in the same memory @@ -686,13 +685,13 @@ phys_size_t lmb_get_free_size(phys_addr_t addr) { int i; long rgn; - struct lmb_region *lmb_used = lmb_used_mem.data; - struct lmb_region *lmb_memory = lmb_free_mem.data; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data;
/* check if the requested address is in the memory regions */ - rgn = lmb_overlaps_region(&lmb_free_mem, addr, 1); + rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1); if (rgn >= 0) { - for (i = 0; i < lmb_used_mem.count; i++) { + for (i = 0; i < lmb.used_mem.count; i++) { if (addr < lmb_used[i].base) { /* first reserved range > requested address */ return lmb_used[i].base - addr; @@ -704,8 +703,8 @@ phys_size_t lmb_get_free_size(phys_addr_t addr) } } /* if we come here: no reserved ranges above requested addr */ - return lmb_memory[lmb_free_mem.count - 1].base + - lmb_memory[lmb_free_mem.count - 1].size - addr; + return lmb_memory[lmb.free_mem.count - 1].base + + lmb_memory[lmb.free_mem.count - 1].size - addr; } return 0; } @@ -713,9 +712,9 @@ phys_size_t lmb_get_free_size(phys_addr_t addr) int lmb_is_reserved_flags(phys_addr_t addr, int flags) { int i; - struct lmb_region *lmb_used = lmb_used_mem.data; + struct lmb_region *lmb_used = lmb.used_mem.data;
- for (i = 0; i < lmb_used_mem.count; i++) { + for (i = 0; i < lmb.used_mem.count; i++) { phys_addr_t upper = lmb_used[i].base + lmb_used[i].size - 1; if ((addr >= lmb_used[i].base) && (addr <= upper)) @@ -760,14 +759,14 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, { bool ret;
- ret = alist_init(&lmb_free_mem, sizeof(struct lmb_region), + ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region), (uint)LMB_ALIST_INITIAL_SIZE); if (!ret) { log_debug("Unable to initialise the list for LMB free memory\n"); return -1; }
- ret = alist_init(&lmb_used_mem, sizeof(struct lmb_region), + ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), (uint)LMB_ALIST_INITIAL_SIZE); if (!ret) { log_debug("Unable to initialise the list for LMB used memory\n"); @@ -775,10 +774,10 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, }
if (mem_lst) - *mem_lst = &lmb_free_mem; + *mem_lst = &lmb.free_mem;
if (used_lst) - *used_lst = &lmb_used_mem; + *used_lst = &lmb.used_mem;
if (!add_rsv_mem) return 0; @@ -829,3 +828,8 @@ int initr_lmb(void)
return ret; } + +struct lmb *lmb_get(void) +{ + return &lmb; +} diff --git a/test/cmd/bdinfo.c b/test/cmd/bdinfo.c index 3184aaf629f..e18e4701bbe 100644 --- a/test/cmd/bdinfo.c +++ b/test/cmd/bdinfo.c @@ -129,12 +129,12 @@ static int lmb_test_dump_region(struct unit_test_state *uts,
static int lmb_test_dump_all(struct unit_test_state *uts) { - extern struct alist lmb_free_mem; - extern struct alist lmb_used_mem; + struct lmb *lmb = lmb_get();
+ ut_assertnonnull(lmb); ut_assert_nextline("lmb_dump_all:"); - ut_assertok(lmb_test_dump_region(uts, &lmb_free_mem, "memory")); - ut_assertok(lmb_test_dump_region(uts, &lmb_used_mem, "reserved")); + ut_assertok(lmb_test_dump_region(uts, &lmb->free_mem, "memory")); + ut_assertok(lmb_test_dump_region(uts, &lmb->used_mem, "reserved"));
return 0; }

This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org ---
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
- if (alist_empty(lmb_rgn_lst)) { - rgn[0].base = base; - rgn[0].size = size; - rgn[0].flags = flags; - lmb_rgn_lst->count = 1; - return 0; - } - /* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base; @@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
- if (i < lmb_rgn_lst->count - 1 && - rgn[i].flags == rgn[i + 1].flags) { - if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { - lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); - coalesced++; - } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { - /* fix overlapping area */ - lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); - coalesced++; + if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { + rgn = lmb_rgn_lst->data; + if (rgn[i].flags == rgn[i + 1].flags) { + if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { + lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { + /* fix overlapping area */ + lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } } }
if (coalesced) return coalesced;
- if (alist_full(lmb_rgn_lst)) { - if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2)) - return -1; - else - rgn = lmb_rgn_lst->data; - } + if (!alist_add_placeholder(lmb_rgn_lst)) + return -1; + rgn = lmb_rgn_lst->data;
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) { - if (base < rgn[i].base) { - rgn[i + 1].base = rgn[i].base; - rgn[i + 1].size = rgn[i].size; - rgn[i + 1].flags = rgn[i].flags; + if (i && base < rgn[i - 1].base) { + rgn[i] = rgn[i - 1]; } else { - rgn[i + 1].base = base; - rgn[i + 1].size = size; - rgn[i + 1].flags = flags; + rgn[i].base = base; + rgn[i].size = size; + rgn[i].flags = flags; break; } }
- if (base < rgn[0].base) { - rgn[0].base = base; - rgn[0].size = size; - rgn[0].flags = flags; - } - - lmb_rgn_lst->count++; - return 0; }

On Tue, 30 Jul 2024 at 20:10, Simon Glass sjg@chromium.org wrote:
This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
if (alist_empty(lmb_rgn_lst)) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
lmb_rgn_lst->count = 1;
return 0;
}
/* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base;
@@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
if (i < lmb_rgn_lst->count - 1 &&
rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
rgn = lmb_rgn_lst->data;
if (rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} } } if (coalesced) return coalesced;
if (alist_full(lmb_rgn_lst)) {
if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2))
return -1;
else
rgn = lmb_rgn_lst->data;
}
if (!alist_add_placeholder(lmb_rgn_lst))
return -1;
rgn = lmb_rgn_lst->data;
I think the above should have a check for alist_full(), and then add to the list if full. Else we simply go on adding to the list on every new node that gets added.
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) {
if (base < rgn[i].base) {
rgn[i + 1].base = rgn[i].base;
rgn[i + 1].size = rgn[i].size;
rgn[i + 1].flags = rgn[i].flags;
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1]; } else {
rgn[i + 1].base = base;
rgn[i + 1].size = size;
rgn[i + 1].flags = flags;
rgn[i].base = base;
rgn[i].size = size;
rgn[i].flags = flags; break; } }
With the logic that you put above, should the loop not have 'i' initialised to lmb_rgn_lst-count? I mean
for (i = lmb_rgn_lst->count; i >= 0; i--)
Else we are overwriting one region?
-sughosh
if (base < rgn[0].base) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
}
lmb_rgn_lst->count++;
return 0;
}
-- 2.34.1

On Thu, 1 Aug 2024 at 20:28, Sughosh Ganu sughosh.ganu@linaro.org wrote:
On Tue, 30 Jul 2024 at 20:10, Simon Glass sjg@chromium.org wrote:
This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
if (alist_empty(lmb_rgn_lst)) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
lmb_rgn_lst->count = 1;
return 0;
}
/* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base;
@@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
if (i < lmb_rgn_lst->count - 1 &&
rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
rgn = lmb_rgn_lst->data;
if (rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} } } if (coalesced) return coalesced;
if (alist_full(lmb_rgn_lst)) {
if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2))
return -1;
else
rgn = lmb_rgn_lst->data;
}
if (!alist_add_placeholder(lmb_rgn_lst))
return -1;
rgn = lmb_rgn_lst->data;
I think the above should have a check for alist_full(), and then add to the list if full. Else we simply go on adding to the list on every new node that gets added.
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) {
if (base < rgn[i].base) {
rgn[i + 1].base = rgn[i].base;
rgn[i + 1].size = rgn[i].size;
rgn[i + 1].flags = rgn[i].flags;
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1]; } else {
rgn[i + 1].base = base;
rgn[i + 1].size = size;
rgn[i + 1].flags = flags;
rgn[i].base = base;
rgn[i].size = size;
rgn[i].flags = flags; break; } }
With the logic that you put above, should the loop not have 'i' initialised to lmb_rgn_lst-count? I mean
for (i = lmb_rgn_lst->count; i >= 0; i--)
Else we are overwriting one region?
To answer my question, yes we do need to change the loop start to accommodate the new node. This code is better, thanks for the suggestion!
-sughosh
-sughosh
if (base < rgn[0].base) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
}
lmb_rgn_lst->count++;
return 0;
}
-- 2.34.1

Hi Sughosh,
On Thu, 1 Aug 2024 at 08:58, Sughosh Ganu sughosh.ganu@linaro.org wrote:
On Tue, 30 Jul 2024 at 20:10, Simon Glass sjg@chromium.org wrote:
This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
if (alist_empty(lmb_rgn_lst)) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
lmb_rgn_lst->count = 1;
return 0;
}
/* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base;
@@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
if (i < lmb_rgn_lst->count - 1 &&
rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
rgn = lmb_rgn_lst->data;
if (rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} } } if (coalesced) return coalesced;
if (alist_full(lmb_rgn_lst)) {
if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2))
return -1;
else
rgn = lmb_rgn_lst->data;
}
if (!alist_add_placeholder(lmb_rgn_lst))
return -1;
rgn = lmb_rgn_lst->data;
I think the above should have a check for alist_full(), and then add to the list if full. Else we simply go on adding to the list on every new node that gets added.
At this point in the function, we know that we are adding a new entry. We could have an anlist_insert() perhaps?
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) {
if (base < rgn[i].base) {
rgn[i + 1].base = rgn[i].base;
rgn[i + 1].size = rgn[i].size;
rgn[i + 1].flags = rgn[i].flags;
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1]; } else {
rgn[i + 1].base = base;
rgn[i + 1].size = size;
rgn[i + 1].flags = flags;
rgn[i].base = base;
rgn[i].size = size;
rgn[i].flags = flags; break; } }
With the logic that you put above, should the loop not have 'i' initialised to lmb_rgn_lst-count? I mean
for (i = lmb_rgn_lst->count; i >= 0; i--)
Else we are overwriting one region?
It starts by doing the assignment rgn[count - 1 + 1] = rgn[count - 1], which I believe is correct?
if (base < rgn[0].base) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
}
lmb_rgn_lst->count++;
return 0;
}
-- 2.34.1
Regards, SImon

On Thu, 1 Aug 2024 at 21:42, Simon Glass sjg@chromium.org wrote:
Hi Sughosh,
On Thu, 1 Aug 2024 at 08:58, Sughosh Ganu sughosh.ganu@linaro.org wrote:
On Tue, 30 Jul 2024 at 20:10, Simon Glass sjg@chromium.org wrote:
This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
if (alist_empty(lmb_rgn_lst)) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
lmb_rgn_lst->count = 1;
return 0;
}
/* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base;
@@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
if (i < lmb_rgn_lst->count - 1 &&
rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
rgn = lmb_rgn_lst->data;
if (rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} } } if (coalesced) return coalesced;
if (alist_full(lmb_rgn_lst)) {
if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2))
return -1;
else
rgn = lmb_rgn_lst->data;
}
if (!alist_add_placeholder(lmb_rgn_lst))
return -1;
rgn = lmb_rgn_lst->data;
I think the above should have a check for alist_full(), and then add to the list if full. Else we simply go on adding to the list on every new node that gets added.
At this point in the function, we know that we are adding a new entry. We could have an anlist_insert() perhaps?
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) {
if (base < rgn[i].base) {
rgn[i + 1].base = rgn[i].base;
rgn[i + 1].size = rgn[i].size;
rgn[i + 1].flags = rgn[i].flags;
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1]; } else {
rgn[i + 1].base = base;
rgn[i + 1].size = size;
rgn[i + 1].flags = flags;
rgn[i].base = base;
rgn[i].size = size;
rgn[i].flags = flags; break; } }
With the logic that you put above, should the loop not have 'i' initialised to lmb_rgn_lst-count? I mean
for (i = lmb_rgn_lst->count; i >= 0; i--)
Else we are overwriting one region?
It starts by doing the assignment rgn[count - 1 + 1] = rgn[count - 1], which I believe is correct?
That was the earlier logic, but now this is being replaced by
+ if (i && base < rgn[i - 1].base) { + rgn[i] = rgn[i - 1];
Which means 'i' should be initialised to lmb_rgn_cnt->count? If you see the patch, the for loop initialisation is not being changed.
-sughosh
if (base < rgn[0].base) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
}
lmb_rgn_lst->count++;
return 0;
}
-- 2.34.1
Regards, SImon

Hi Sughosh,
On Thu, 1 Aug 2024 at 10:19, Sughosh Ganu sughosh.ganu@linaro.org wrote:
On Thu, 1 Aug 2024 at 21:42, Simon Glass sjg@chromium.org wrote:
Hi Sughosh,
On Thu, 1 Aug 2024 at 08:58, Sughosh Ganu sughosh.ganu@linaro.org wrote:
On Tue, 30 Jul 2024 at 20:10, Simon Glass sjg@chromium.org wrote:
This function has more special cases than it needs. Simplify it to reduce code size and complexity.
Signed-off-by: Simon Glass sjg@chromium.org
lib/lmb.c | 57 +++++++++++++++++++------------------------------------ 1 file changed, 19 insertions(+), 38 deletions(-)
diff --git a/lib/lmb.c b/lib/lmb.c index c11ce308c5b..83b060a2f4d 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -396,14 +396,6 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, if (alist_err(lmb_rgn_lst)) return -1;
if (alist_empty(lmb_rgn_lst)) {
rgn[0].base = base;
rgn[0].size = size;
rgn[0].flags = flags;
lmb_rgn_lst->count = 1;
return 0;
}
/* First try and coalesce this LMB with another. */ for (i = 0; i < lmb_rgn_lst->count; i++) { phys_addr_t rgnbase = rgn[i].base;
@@ -448,50 +440,39 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } }
if (i < lmb_rgn_lst->count - 1 &&
rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
rgn = lmb_rgn_lst->data;
if (rgn[i].flags == rgn[i + 1].flags) {
if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
/* fix overlapping area */
lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
coalesced++;
} } } if (coalesced) return coalesced;
if (alist_full(lmb_rgn_lst)) {
if (!alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc * 2))
return -1;
else
rgn = lmb_rgn_lst->data;
}
if (!alist_add_placeholder(lmb_rgn_lst))
return -1;
rgn = lmb_rgn_lst->data;
I think the above should have a check for alist_full(), and then add to the list if full. Else we simply go on adding to the list on every new node that gets added.
At this point in the function, we know that we are adding a new entry. We could have an anlist_insert() perhaps?
/* Couldn't coalesce the LMB, so add it to the sorted table. */ for (i = lmb_rgn_lst->count - 1; i >= 0; i--) {
if (base < rgn[i].base) {
rgn[i + 1].base = rgn[i].base;
rgn[i + 1].size = rgn[i].size;
rgn[i + 1].flags = rgn[i].flags;
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1]; } else {
rgn[i + 1].base = base;
rgn[i + 1].size = size;
rgn[i + 1].flags = flags;
rgn[i].base = base;
rgn[i].size = size;
rgn[i].flags = flags; break; } }
With the logic that you put above, should the loop not have 'i' initialised to lmb_rgn_lst-count? I mean
for (i = lmb_rgn_lst->count; i >= 0; i--)
Else we are overwriting one region?
It starts by doing the assignment rgn[count - 1 + 1] = rgn[count - 1], which I believe is correct?
That was the earlier logic, but now this is being replaced by
if (i && base < rgn[i - 1].base) {
rgn[i] = rgn[i - 1];
Which means 'i' should be initialised to lmb_rgn_cnt->count? If you see the patch, the for loop initialisation is not being changed.
OK, well I'll let you sort that out :-)
Regards, Simon

The normal case for initing lmb is to set up all the memory regions. Put that in a separate function, called lmb_init(). The other function is now only used for testing.
Note that tests never set up the memory regions, so we can drop that code.
Signed-off-by: Simon Glass sjg@chromium.org ---
common/board_r.c | 4 +-- include/lmb.h | 13 +++++++ lib/lmb.c | 88 +++++++++++++++++++++++++----------------------- 3 files changed, 59 insertions(+), 46 deletions(-)
diff --git a/common/board_r.c b/common/board_r.c index eaf9b40ec02..9ce1b8cdd2b 100644 --- a/common/board_r.c +++ b/common/board_r.c @@ -611,9 +611,7 @@ static init_fnc_t init_sequence_r[] = { #ifdef CONFIG_CLOCKS set_cpu_clk_info, /* Setup clock information */ #endif -#if CONFIG_IS_ENABLED(LMB) - initr_lmb, -#endif + lmb_init, #ifdef CONFIG_EFI_LOADER efi_memory_init, #endif diff --git a/include/lmb.h b/include/lmb.h index 19231439c34..a2236a419fd 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -96,6 +96,19 @@ void board_lmb_reserve(void); void arch_lmb_reserve(void); void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align);
+#if CONFIG_IS_ENABLED(LMB) +/** + * lmb_init() - Initialise the LMB memory + * + * Initialise the LMB-subsystem-related data structures. + * + * Return: 0 if OK, -ve on failure. + */ +int lmb_init(void); +#else +static int lmb_init(void) { return 0; } +#endif + /** * lmb_mem_regions_init() - Initialise the LMB memory * @mem_lst: Pointer to store location of free memory list diff --git a/lib/lmb.c b/lib/lmb.c index 83b060a2f4d..34cbaeaafd5 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -718,6 +718,47 @@ __weak void arch_lmb_reserve(void) arch_lmb_reserve_generic(rsv_start, gd->ram_top, 16384); }
+static int lmb_setup(struct lmb *lmb) +{ + int ret; + + ret = alist_init(&lmb->free_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB free memory\n"); + return -ENOMEM; + } + + ret = alist_init(&lmb->used_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB used memory\n"); + return -ENOMEM; + } + + return 0; +} + +int lmb_init(void) +{ + bool ret; + + ret = lmb_setup(&lmb); + if (ret) { + log_debug("Unable to init LMB\n"); + return ret; + } + lmb_add_memory(); + + /* Reserve the U-Boot image region once U-Boot has relocated */ + if (spl_phase() == PHASE_SPL) + lmb_reserve_common_spl(); + else if (spl_phase() == PHASE_BOARD_R) + lmb_reserve_common((void *)gd->fdt_blob); + + return 0; +} + /** * lmb_mem_regions_init() - Initialise the LMB memory * @mem_lst: Pointer to store location of free memory list @@ -740,18 +781,10 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, { bool ret;
- ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region), - (uint)LMB_ALIST_INITIAL_SIZE); - if (!ret) { - log_debug("Unable to initialise the list for LMB free memory\n"); - return -1; - } - - ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), - (uint)LMB_ALIST_INITIAL_SIZE); - if (!ret) { - log_debug("Unable to initialise the list for LMB used memory\n"); - return -1; + ret = lmb_setup(&lmb); + if (ret) { + log_debug("Unable to init LMB\n"); + return ret; }
if (mem_lst) @@ -760,17 +793,6 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, if (used_lst) *used_lst = &lmb.used_mem;
- if (!add_rsv_mem) - return 0; - - lmb_add_memory(); - - /* Reserve the U-Boot image region once U-Boot has relocated */ - if (spl_phase() == PHASE_SPL) - lmb_reserve_common_spl(); - else if (spl_phase() == PHASE_BOARD_R) - lmb_reserve_common((void *)gd->fdt_blob); - return 0; }
@@ -790,26 +812,6 @@ void __maybe_unused lmb_mem_regions_uninit(struct alist *mem_lst, alist_uninit(used_lst); }
-/** - * initr_lmb() - Initialise the LMB lists - * - * Initialise the LMB lists needed for keeping the memory map. There - * are two lists, in form of alloced list data structure. One for the - * available memory, and one for the used memory. - * - * Return: 0 on success, -ve on error - */ -int initr_lmb(void) -{ - int ret; - - ret = lmb_mem_regions_init(NULL, NULL, true); - if (ret) - printf("Unable to initialise the LMB data structures\n"); - - return ret; -} - struct lmb *lmb_get(void) { return &lmb;

Tests should not change the lmb state, so provide a way to save and restore the state.
Note: The _norun tests can now become normal tests.
When tests fail, lmb is broken. This is probably fine. If it causes problems we could save/restore outside the test, using a UT_TESTF_ flag.
Signed-off-by: Simon Glass sjg@chromium.org ---
include/lmb.h | 32 ++++++++--------------- lib/lmb.c | 70 ++++++++++++++++++--------------------------------- test/lmb_ut.c | 54 +++++++++++++++++++++++++++------------ 3 files changed, 73 insertions(+), 83 deletions(-)
diff --git a/include/lmb.h b/include/lmb.h index a2236a419fd..ed943b29234 100644 --- a/include/lmb.h +++ b/include/lmb.h @@ -110,35 +110,23 @@ static int lmb_init(void) { return 0; } #endif
/** - * lmb_mem_regions_init() - Initialise the LMB memory - * @mem_lst: Pointer to store location of free memory list - * @used_lst: Pointer to store location of used memory list - * @add_rsv_mem: flag to indicate if memory is to be added and reserved + * lmb_push() - Store existing lmb state and set up a new state * - * Initialise the LMB subsystem related data structures. There are two - * alloced lists that are initialised, one for the free memory, and one - * for the used memory. + * This is only used for testing * - * Initialise the two lists as part of board init during boot. When called - * from a test, passes the pointers to the two lists to the caller. The - * caller is then required to call the corresponding function to uninit - * the lists. - * - * Return: 0 if OK, -ve on failure. + * @store: Place to store the current lmb state + * Return: 0 if OK, -ENOMEM if out of memory */ -int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, - bool add_rsv_mem); +int lmb_push(struct lmb *store);
/** - * lmb_mem_regions_uninit() - Unitialise the lmb lists - * @mem_lst: Pointer to store location of free memory list - * @used_lst: Pointer to store location of used memory list + * lmb_pop() - Restore an old lmb state + * + * This is only used for testing * - * Unitialise the LMB lists for free and used memory that was - * initialised as part of the init function. Called when running - * lmb test routines. + * @store: lmb state to restore */ -void lmb_mem_regions_uninit(struct alist *mem_lst, struct alist *used_lst); +void lmb_pop(struct lmb *store);
struct lmb *lmb_get(void);
diff --git a/lib/lmb.c b/lib/lmb.c index 34cbaeaafd5..ce1526f11bc 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -739,31 +739,8 @@ static int lmb_setup(struct lmb *lmb) return 0; }
-int lmb_init(void) -{ - bool ret; - - ret = lmb_setup(&lmb); - if (ret) { - log_debug("Unable to init LMB\n"); - return ret; - } - lmb_add_memory(); - - /* Reserve the U-Boot image region once U-Boot has relocated */ - if (spl_phase() == PHASE_SPL) - lmb_reserve_common_spl(); - else if (spl_phase() == PHASE_BOARD_R) - lmb_reserve_common((void *)gd->fdt_blob); - - return 0; -} - /** * lmb_mem_regions_init() - Initialise the LMB memory - * @mem_lst: Pointer to store location of free memory list - * @used_lst: Pointer to store location of used memory list - * @add_rsv_mem: flag to indicate if memory is to be added and reserved * * Initialise the LMB subsystem related data structures. There are two * alloced lists that are initialised, one for the free memory, and one @@ -776,8 +753,7 @@ int lmb_init(void) * * Return: 0 if OK, -ve on failure. */ -int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, - bool add_rsv_mem) +int lmb_init(void) { bool ret;
@@ -786,33 +762,37 @@ int lmb_mem_regions_init(struct alist **mem_lst, struct alist **used_lst, log_debug("Unable to init LMB\n"); return ret; } + lmb_add_memory();
- if (mem_lst) - *mem_lst = &lmb.free_mem; - - if (used_lst) - *used_lst = &lmb.used_mem; + /* Reserve the U-Boot image region once U-Boot has relocated */ + if (spl_phase() == PHASE_SPL) + lmb_reserve_common_spl(); + else if (spl_phase() == PHASE_BOARD_R) + lmb_reserve_common((void *)gd->fdt_blob);
return 0; }
-/** - * lmb_mem_regions_uninit() - Unitialise the lmb lists - * @mem_lst: Pointer to store location of free memory list - * @used_lst: Pointer to store location of used memory list - * - * Unitialise the LMB lists for free and used memory that was - * initialised as part of the init function. Called when running - * lmb test routines. - */ -void __maybe_unused lmb_mem_regions_uninit(struct alist *mem_lst, - struct alist *used_lst) +struct lmb *lmb_get(void) +{ + return &lmb; +} + +int lmb_push(struct lmb *store) { - alist_uninit(mem_lst); - alist_uninit(used_lst); + int ret; + + *store = lmb; + ret = lmb_setup(&lmb); + if (ret) + return ret; + + return 0; }
-struct lmb *lmb_get(void) +void lmb_pop(struct lmb *store) { - return &lmb; + alist_uninit(&lmb.free_mem); + alist_uninit(&lmb.used_mem); + lmb = *store; } diff --git a/test/lmb_ut.c b/test/lmb_ut.c index 49f75e55d2b..dfdc7687321 100644 --- a/test/lmb_ut.c +++ b/test/lmb_ut.c @@ -61,6 +61,19 @@ static int check_lmb(struct unit_test_state *uts, struct alist *mem_lst, num_reserved, base1, size1, base2, size2, base3, \ size3))
+static int setup_lmb_test(struct unit_test_state *uts, struct lmb *store, + struct alist **mem_lstp, struct alist **used_lstp) +{ + struct lmb *lmb; + + ut_assertok(lmb_push(store)); + lmb = lmb_get(); + *mem_lstp = &lmb->free_mem; + *used_lstp = &lmb->used_mem; + + return 0; +} + static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram, const phys_size_t ram_size, const phys_addr_t ram0, const phys_size_t ram0_size, @@ -73,6 +86,7 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram, struct alist *mem_lst, *used_lst; struct lmb_region *mem, *used; phys_addr_t a, a2, b, b2, c, d; + struct lmb store;
/* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram); @@ -81,7 +95,7 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram, ut_assert(alloc_64k_addr >= ram + 8); ut_assert(alloc_64k_end <= ram_end - 8);
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -183,7 +197,7 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram, ut_asserteq(mem[0].size, ram_size); }
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -243,11 +257,12 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram) struct lmb_region *mem, *used; long ret; phys_addr_t a, b; + struct lmb store;
/* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram);
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -284,7 +299,7 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram) ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000, 0, 0, 0, 0);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -315,11 +330,12 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram, struct lmb_region *mem, *used; const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) & ~(align - 1); + struct lmb store;
/* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram);
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -366,7 +382,7 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram, ut_asserteq(ret, 0); ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -411,8 +427,9 @@ static int lmb_test_lmb_at_0_norun(struct unit_test_state *uts) struct lmb_region *mem, *used; long ret; phys_addr_t a, b; + struct lmb store;
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -441,7 +458,7 @@ static int lmb_test_lmb_at_0_norun(struct unit_test_state *uts) ut_asserteq(ret, 0); ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -455,8 +472,9 @@ static int lmb_test_lmb_overlapping_reserve_norun(struct unit_test_state *uts) struct alist *mem_lst, *used_lst; struct lmb_region *mem, *used; long ret; + struct lmb store;
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -496,7 +514,7 @@ static int lmb_test_lmb_overlapping_reserve_norun(struct unit_test_state *uts) ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40000000, 0x40000, 0, 0, 0, 0);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -517,11 +535,12 @@ static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram) const phys_size_t alloc_addr_c = ram + 0x8000000 * 3; long ret; phys_addr_t a, b, c, d, e; + struct lmb store;
/* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram);
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -618,7 +637,7 @@ static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram) ut_asserteq(ret, 0); }
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -648,13 +667,15 @@ static int test_get_unreserved_size(struct unit_test_state *uts, const phys_size_t alloc_addr_a = ram + 0x8000000; const phys_size_t alloc_addr_b = ram + 0x8000000 * 2; const phys_size_t alloc_addr_c = ram + 0x8000000 * 3; + struct lmb store; long ret; phys_size_t s;
/* check for overflow */ ut_assert(ram_end == 0 || ram_end > ram);
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); + mem = mem_lst->data; used = used_lst->data;
@@ -693,7 +714,7 @@ static int test_get_unreserved_size(struct unit_test_state *uts, s = lmb_get_free_size(ram_end - 4); ut_asserteq(s, 4);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; } @@ -718,9 +739,10 @@ static int lmb_test_lmb_flags_norun(struct unit_test_state *uts) struct alist *mem_lst, *used_lst; const phys_addr_t ram = 0x40000000; const phys_size_t ram_size = 0x20000000; + struct lmb store; long ret;
- ut_asserteq(lmb_mem_regions_init(&mem_lst, &used_lst, false), 0); + ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst)); mem = mem_lst->data; used = used_lst->data;
@@ -798,7 +820,7 @@ static int lmb_test_lmb_flags_norun(struct unit_test_state *uts) ut_asserteq(lmb_is_nomap(&used[1]), 0); ut_asserteq(lmb_is_nomap(&used[2]), 1);
- lmb_mem_regions_uninit(mem_lst, used_lst); + lmb_pop(&store);
return 0; }
participants (2)
-
Simon Glass
-
Sughosh Ganu