summaryrefslogtreecommitdiff
path: root/lib/lmb.c
diff options
context:
space:
mode:
authorSimon Goldschmidt <simon.k.r.goldschmidt@gmail.com>2019-01-14 22:38:15 +0100
committerTom Rini <trini@konsulko.com>2019-01-16 16:36:48 -0500
commitd67f33cf4ee72fd9bc64d68cb51a77798b65cf3a (patch)
tree26bf0439372588bf6a600387650033862ae90f34 /lib/lmb.c
parenta01ae0c23f3a72b05dab623806adee4634def172 (diff)
downloadu-boot-d67f33cf4ee72fd9bc64d68cb51a77798b65cf3a.tar.gz
lmb: fix allocation at end of address range
The lmb code fails if base + size of RAM overflows to zero. Fix this by calculating end as 'base + size - 1' instead of 'base + size' where appropriate. Added tests to assert this is fixed. Reviewed-by: Simon Glass <sjg@chromium.org> Signed-off-by: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index 1705417348..6d3dcf4e09 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -43,7 +43,10 @@ void lmb_dump_all(struct lmb *lmb)
static long lmb_addrs_overlap(phys_addr_t base1,
phys_size_t size1, phys_addr_t base2, phys_size_t size2)
{
- return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
+ const phys_addr_t base1_end = base1 + size1 - 1;
+ const phys_addr_t base2_end = base2 + size2 - 1;
+
+ return ((base1 <= base2_end) && (base2 <= base1_end));
}
static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
@@ -89,18 +92,9 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,
void lmb_init(struct lmb *lmb)
{
- /* Create a dummy zero size LMB which will get coalesced away later.
- * This simplifies the lmb_add() code below...
- */
- lmb->memory.region[0].base = 0;
- lmb->memory.region[0].size = 0;
- lmb->memory.cnt = 1;
+ lmb->memory.cnt = 0;
lmb->memory.size = 0;
-
- /* Ditto. */
- lmb->reserved.region[0].base = 0;
- lmb->reserved.region[0].size = 0;
- lmb->reserved.cnt = 1;
+ lmb->reserved.cnt = 0;
lmb->reserved.size = 0;
}
@@ -110,9 +104,10 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
unsigned long coalesced = 0;
long adjacent, i;
- if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
+ if (rgn->cnt == 0) {
rgn->region[0].base = base;
rgn->region[0].size = size;
+ rgn->cnt = 1;
return 0;
}
@@ -183,7 +178,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{
struct lmb_region *rgn = &(lmb->reserved);
phys_addr_t rgnbegin, rgnend;
- phys_addr_t end = base + size;
+ phys_addr_t end = base + size - 1;
int i;
rgnbegin = rgnend = 0; /* supress gcc warnings */
@@ -191,7 +186,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Find the region where (base, size) belongs to */
for (i=0; i < rgn->cnt; i++) {
rgnbegin = rgn->region[i].base;
- rgnend = rgnbegin + rgn->region[i].size;
+ rgnend = rgnbegin + rgn->region[i].size - 1;
if ((rgnbegin <= base) && (end <= rgnend))
break;
@@ -209,7 +204,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Check to see if region is matching at the front */
if (rgnbegin == base) {
- rgn->region[i].base = end;
+ rgn->region[i].base = end + 1;
rgn->region[i].size -= size;
return 0;
}
@@ -225,7 +220,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
* beginging of the hole and add the region after hole.
*/
rgn->region[i].size = base - rgn->region[i].base;
- return lmb_add_region(rgn, end, rgnend - end);
+ return lmb_add_region(rgn, end + 1, rgnend - end);
}
long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)