commit 4eba0ef: [Fix] Fix poor man allocator algorithm
Vsevolod Stakhov
vsevolod at highsecure.ru
Thu Jul 9 12:21:07 UTC 2020
Author: Vsevolod Stakhov
Date: 2020-07-09 12:40:43 +0100
URL: https://github.com/rspamd/rspamd/commit/4eba0ef16211ae27f13e826fb1957858439666ec
[Fix] Fix poor man allocator algorithm
---
contrib/lua-lpeg/lpvm.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/contrib/lua-lpeg/lpvm.c b/contrib/lua-lpeg/lpvm.c
index 6058bf5b1..e107292e2 100644
--- a/contrib/lua-lpeg/lpvm.c
+++ b/contrib/lua-lpeg/lpvm.c
@@ -32,7 +32,7 @@ struct poor_slab {
};
/* Used to optimize pages allocation */
-struct poor_slab slabs;
+RSPAMD_ALIGNED (64) struct poor_slab slabs;
static uint64_t xorshifto_seed[2] = {0xdeadbabe, 0xdeadbeef};
@@ -64,7 +64,7 @@ lpeg_allocate_mem_low (size_t sz)
uint64_t s1 = xorshifto_seed[1];
s1 ^= s0;
- xorshifto_seed[0] = xoroshiro_rotl(s0, 55) ^ s1 ^ (s1 << 14);
+ xorshifto_seed[0] = xoroshiro_rotl (s0, 55) ^ s1 ^ (s1 << 14);
xorshifto_seed[1] = xoroshiro_rotl (s1, 36);
flags |= MAP_FIXED;
/* Get 46 bits */
@@ -77,7 +77,7 @@ lpeg_allocate_mem_low (size_t sz)
memcpy (cp, &sz, sizeof (sz));
for (unsigned i = 0; i < MAX_PIECES; i ++) {
- if (slabs.pieces[i].sz == 0) {
+ if (slabs.pieces[i].occupied == 0) {
/* Store piece */
slabs.pieces[i].sz = sz;
slabs.pieces[i].ptr = cp;
@@ -90,7 +90,7 @@ lpeg_allocate_mem_low (size_t sz)
/* Not enough free pieces, pop some */
unsigned sel = ((uintptr_t)cp) & ((MAX_PIECES * 2) - 1);
/* Here we free memory in fact */
- munmap (slabs.pieces[sel].ptr, slabs.pieces[sel].sz);
+ munmap (slabs.pieces[sel].ptr, slabs.pieces[sel].sz + sizeof (sz));
slabs.pieces[sel].sz = sz;
slabs.pieces[sel].ptr = cp;
slabs.pieces[sel].occupied = 1;
More information about the Commits
mailing list