1
0
mirror of https://xff.cz/git/u-boot/ synced 2025-09-25 04:21:18 +02:00

Merge patch series "mcheck implementation for U-Boot"

Eugene Uriev <eugeneuriev@gmail.com> says:

There was no "mcheck" for U-Boot before.

Since U-Boot has only 1 thread, and normally makes 4000+ - 6000+
mallocs, it's better to use havier canaries to protect heap-chunks.
My variant uses 2x8 = 16byte-long protector. And
the multiplier could be changed to tune speed/protection tradeoff.
This protects not only against memset()-s, but against "near" wild
pointers too, and makes more probable to catch "distant" ones.

The core file of the set is included into the C-file, not complied
separately in order to enable (potential) coexisting of
mcheck-protectors, e.g. malloc_simple(.) and dlmalloc simultaneously.

My tests were for ARM SoC, 64bit, so the patch is
aware of alignment.

Primary this patch is for using by developers: to verify,
if a change doesn't break the heap integrity.
By default the mcheck is disabled and wouldn't affect the boot.

I used pedantic mode, canary=16byte, registry-size=6608.
For my system the overhead was 230ms.
This commit is contained in:
Tom Rini
2024-04-12 16:23:19 -06:00
4 changed files with 488 additions and 25 deletions

View File

@@ -718,6 +718,7 @@ static int reloc_bloblist(void)
return 0;
}
void mcheck_on_ramrelocation(size_t offset);
static int setup_reloc(void)
{
if (!(gd->flags & GD_FLG_SKIP_RELOC)) {
@@ -743,6 +744,9 @@ static int setup_reloc(void)
if (gd->flags & GD_FLG_SKIP_RELOC) {
debug("Skipping relocation due to flag\n");
} else {
#ifdef MCHECK_HEAP_PROTECTION
mcheck_on_ramrelocation(gd->reloc_off);
#endif
debug("Relocation Offset is: %08lx\n", gd->reloc_off);
debug("Relocating to %08lx, new gd at %08lx, sp at %08lx\n",
gd->relocaddr, (ulong)map_to_sysmem(gd->new_gd),

View File

@@ -32,6 +32,21 @@ void malloc_stats();
DECLARE_GLOBAL_DATA_PTR;
#ifdef MCHECK_HEAP_PROTECTION
#define STATIC_IF_MCHECK static
#undef MALLOC_COPY
#undef MALLOC_ZERO
static inline void MALLOC_ZERO(void *p, size_t sz) { memset(p, 0, sz); }
static inline void MALLOC_COPY(void *dest, const void *src, size_t sz) { memcpy(dest, src, sz); }
#else
#define STATIC_IF_MCHECK
#define mALLOc_impl mALLOc
#define fREe_impl fREe
#define rEALLOc_impl rEALLOc
#define mEMALIGn_impl mEMALIGn
#define cALLOc_impl cALLOc
#endif
/*
Emulation of sbrk for WIN32
All code within the ifdef WIN32 is untested by me.
@@ -1270,10 +1285,11 @@ static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
*/
STATIC_IF_MCHECK
#if __STD_C
Void_t* mALLOc(size_t bytes)
Void_t* mALLOc_impl(size_t bytes)
#else
Void_t* mALLOc(bytes) size_t bytes;
Void_t* mALLOc_impl(bytes) size_t bytes;
#endif
{
mchunkptr victim; /* inspected/selected chunk */
@@ -1555,10 +1571,11 @@ Void_t* mALLOc(bytes) size_t bytes;
*/
STATIC_IF_MCHECK
#if __STD_C
void fREe(Void_t* mem)
void fREe_impl(Void_t* mem)
#else
void fREe(mem) Void_t* mem;
void fREe_impl(mem) Void_t* mem;
#endif
{
mchunkptr p; /* chunk corresponding to mem */
@@ -1696,10 +1713,11 @@ void fREe(mem) Void_t* mem;
*/
STATIC_IF_MCHECK
#if __STD_C
Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
Void_t* rEALLOc_impl(Void_t* oldmem, size_t bytes)
#else
Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
Void_t* rEALLOc_impl(oldmem, bytes) Void_t* oldmem; size_t bytes;
#endif
{
INTERNAL_SIZE_T nb; /* padded request size */
@@ -1725,7 +1743,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
#ifdef REALLOC_ZERO_BYTES_FREES
if (!bytes) {
fREe(oldmem);
fREe_impl(oldmem);
return NULL;
}
#endif
@@ -1733,7 +1751,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
if ((long)bytes < 0) return NULL;
/* realloc of null is supposed to be same as malloc */
if (oldmem == NULL) return mALLOc(bytes);
if (oldmem == NULL) return mALLOc_impl(bytes);
#if CONFIG_IS_ENABLED(SYS_MALLOC_F)
if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
@@ -1758,7 +1776,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Note the extra SIZE_SZ overhead. */
if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
/* Must alloc, copy, free. */
newmem = mALLOc(bytes);
newmem = mALLOc_impl(bytes);
if (!newmem)
return NULL; /* propagate failure */
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
@@ -1869,7 +1887,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Must allocate */
newmem = mALLOc (bytes);
newmem = mALLOc_impl (bytes);
if (newmem == NULL) /* propagate failure */
return NULL;
@@ -1886,7 +1904,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Otherwise copy, free, and exit */
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
fREe(oldmem);
fREe_impl(oldmem);
return newmem;
} else {
VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
@@ -1905,7 +1923,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
set_inuse_bit_at_offset(remainder, remainder_size);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
false);
fREe(chunk2mem(remainder)); /* let free() deal with it */
fREe_impl(chunk2mem(remainder)); /* let free() deal with it */
}
else
{
@@ -1939,10 +1957,11 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
*/
STATIC_IF_MCHECK
#if __STD_C
Void_t* mEMALIGn(size_t alignment, size_t bytes)
Void_t* mEMALIGn_impl(size_t alignment, size_t bytes)
#else
Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
Void_t* mEMALIGn_impl(alignment, bytes) size_t alignment; size_t bytes;
#endif
{
INTERNAL_SIZE_T nb; /* padded request size */
@@ -1965,7 +1984,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
/* If need less alignment than we give anyway, just relay to malloc */
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
if (alignment <= MALLOC_ALIGNMENT) return mALLOc_impl(bytes);
/* Otherwise, ensure that it is at least a minimum chunk size */
@@ -1974,7 +1993,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
/* Call malloc with worst case padding to hit alignment. */
nb = request2size(bytes);
m = (char*)(mALLOc(nb + alignment + MINSIZE));
m = (char*)(mALLOc_impl(nb + alignment + MINSIZE));
/*
* The attempt to over-allocate (with a size large enough to guarantee the
@@ -1990,7 +2009,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
* Use bytes not nb, since mALLOc internally calls request2size too, and
* each call increases the size to allocate, to account for the header.
*/
m = (char*)(mALLOc(bytes));
m = (char*)(mALLOc_impl(bytes));
/* Aligned -> return it */
if ((((unsigned long)(m)) % alignment) == 0)
return m;
@@ -1998,10 +2017,10 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
* Otherwise, try again, requesting enough extra space to be able to
* acquire alignment.
*/
fREe(m);
fREe_impl(m);
/* Add in extra bytes to match misalignment of unexpanded allocation */
extra = alignment - (((unsigned long)(m)) % alignment);
m = (char*)(mALLOc(bytes + extra));
m = (char*)(mALLOc_impl(bytes + extra));
/*
* m might not be the same as before. Validate that the previous value of
* extra still works for the current value of m.
@@ -2010,7 +2029,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
if (m) {
extra2 = alignment - (((unsigned long)(m)) % alignment);
if (extra2 > extra) {
fREe(m);
fREe_impl(m);
m = NULL;
}
}
@@ -2060,7 +2079,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
set_head(newp, newsize | PREV_INUSE);
set_inuse_bit_at_offset(newp, newsize);
set_head_size(p, leadsize);
fREe(chunk2mem(p));
fREe_impl(chunk2mem(p));
p = newp;
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
@@ -2078,7 +2097,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
set_head_size(p, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
false);
fREe(chunk2mem(remainder));
fREe_impl(chunk2mem(remainder));
}
check_inuse_chunk(p);
@@ -2126,10 +2145,11 @@ Void_t* pvALLOc(bytes) size_t bytes;
*/
STATIC_IF_MCHECK
#if __STD_C
Void_t* cALLOc(size_t n, size_t elem_size)
Void_t* cALLOc_impl(size_t n, size_t elem_size)
#else
Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
Void_t* cALLOc_impl(n, elem_size) size_t n; size_t elem_size;
#endif
{
mchunkptr p;
@@ -2145,7 +2165,7 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
INTERNAL_SIZE_T oldtopsize = chunksize(top);
#endif
#endif
Void_t* mem = mALLOc (sz);
Void_t* mem = mALLOc_impl (sz);
if ((long)n < 0) return NULL;
@@ -2205,6 +2225,90 @@ void cfree(mem) Void_t *mem;
#endif
#ifdef MCHECK_HEAP_PROTECTION
#include "mcheck_core.inc.h"
#if !__STD_C
#error "must have __STD_C"
#endif
Void_t *mALLOc(size_t bytes)
{
mcheck_pedantic_prehook();
size_t fullsz = mcheck_alloc_prehook(bytes);
void *p = mALLOc_impl(fullsz);
if (!p)
return p;
return mcheck_alloc_posthook(p, bytes);
}
void fREe(Void_t *mem) { fREe_impl(mcheck_free_prehook(mem)); }
Void_t *rEALLOc(Void_t *oldmem, size_t bytes)
{
mcheck_pedantic_prehook();
if (bytes == 0) {
if (oldmem)
fREe(oldmem);
return NULL;
}
if (oldmem == NULL)
return mALLOc(bytes);
void *p = mcheck_reallocfree_prehook(oldmem);
size_t newsz = mcheck_alloc_prehook(bytes);
p = rEALLOc_impl(p, newsz);
if (!p)
return p;
return mcheck_alloc_noclean_posthook(p, bytes);
}
Void_t *mEMALIGn(size_t alignment, size_t bytes)
{
mcheck_pedantic_prehook();
size_t fullsz = mcheck_memalign_prehook(alignment, bytes);
void *p = mEMALIGn_impl(alignment, fullsz);
if (!p)
return p;
return mcheck_memalign_posthook(alignment, p, bytes);
}
// pvALLOc, vALLOc - redirect to mEMALIGn, defined here, so they need no wrapping.
Void_t *cALLOc(size_t n, size_t elem_size)
{
mcheck_pedantic_prehook();
// NB: here is no overflow check.
size_t fullsz = mcheck_alloc_prehook(n * elem_size);
void *p = cALLOc_impl(1, fullsz);
if (!p)
return p;
return mcheck_alloc_noclean_posthook(p, n * elem_size);
}
// mcheck API {
int mcheck_pedantic(mcheck_abortfunc_t f)
{
mcheck_initialize(f, 1);
return 0;
}
int mcheck(mcheck_abortfunc_t f)
{
mcheck_initialize(f, 0);
return 0;
}
void mcheck_check_all(void) { mcheck_pedantic_check(); }
enum mcheck_status mprobe(void *__ptr) { return mcheck_mprobe(__ptr); }
// mcheck API }
#endif
/*

304
common/mcheck_core.inc.h Normal file
View File

@@ -0,0 +1,304 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2024 Free Software Foundation, Inc.
* Written by Eugene Uriev, based on glibc 2.0 prototype of Mike Haertel.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
* <https://www.gnu.org/licenses/>
*/
/*
* TL;DR: this is a porting of glibc mcheck into U-Boot
*
* This file contains no entities for external linkage.
* So mcheck protection may be used in parallel, e.g. for "malloc_simple(..)" and "malloc(..)".
* To do so, the file should be shared/include twice, - without linkage conflicts.
* I.e. "core"-part is shared as a source, but not as a binary.
* Maybe some optimization here make sense, to engage more binary sharing too.
* But, currently I strive to keep it as simple, as possible.
* And this, programmers'-only, mode don't pretend to be main.
*
* This library is aware of U-Boot specific. It's also aware of ARM alignment concerns.
* Unlike glibc-clients, U-Boot has limited malloc-usage, and only one thread.
* So it's better to make the protection heavier.
* Thus overflow canary here is greater, than glibc's one. Underflow canary is bigger too.
* U-Boot also allows to use fixed-size heap-registry, instead of double-linked list in glibc.
*
* Heavy canary allows to catch not only memset(..)-errors,
* but overflow/underflow of struct-array access:
* {
* struct mystruct* p = malloc(sizeof(struct mystruct) * N);
* p[-1].field1 = 0;
* p[N].field2 = 13;
* }
* TODO: In order to guarantee full coverage of that kind of errors, a user can add variable-size
* canaries here. So pre- and post-canary with size >= reqested_size, could be provided
* (with the price of 3x heap-usage). Therefore, it would catch 100% of changes beyond
* an array, for index(+1/-1) errors.
*
* U-Boot is a BL, not an OS with a lib. Activity of the library is set not in runtime,
* rather in compile-time, by MCHECK_HEAP_PROTECTION macro. That guarantees that
* we haven't missed first malloc.
*/
/*
* Testing
* This library had been successfully tested for U-Boot @ ARM SoC chip / 64bits.
* Proven for both default and pedantic mode: confirms U-Boot to be clean, and catches
* intentional/testing corruptions. Working with malloc_trim is not tested.
*/
#ifndef _MCHECKCORE_INC_H
#define _MCHECKCORE_INC_H 1
#include "mcheck.h"
#if defined(MCHECK_HEAP_PROTECTION)
#define mcheck_flood memset
// these are from /dev/random:
#define MAGICWORD 0x99ccf430fa562a05ULL
#define MAGICFREE 0x4875e63c0c6fc08eULL
#define MAGICTAIL 0x918dbcd7df78dcd6ULL
#define MALLOCFLOOD ((char)0xb6)
#define FREEFLOOD ((char)0xf5)
#define PADDINGFLOOD ((char)0x58)
// my normal run demands 4427-6449 chunks:
#define REGISTRY_SZ 6608
#define CANARY_DEPTH 2
// avoid problems with BSS at early stage:
static char mcheck_pedantic_flag __section(".data") = 0;
static void *mcheck_registry[REGISTRY_SZ] __section(".data") = {0};
static size_t mcheck_chunk_count __section(".data") = 0;
static size_t mcheck_chunk_count_max __section(".data") = 0;
typedef unsigned long long mcheck_elem;
typedef struct {
mcheck_elem elems[CANARY_DEPTH];
} mcheck_canary;
struct mcheck_hdr {
size_t size; /* Exact size requested by user. */
size_t aln_skip; /* Ignored bytes, before the mcheck_hdr, to fulfill alignment */
mcheck_canary canary; /* Magic number to check header integrity. */
};
static void mcheck_default_abort(enum mcheck_status status, const void *p)
{
const char *msg;
switch (status) {
case MCHECK_OK:
msg = "memory is consistent, library is buggy\n";
break;
case MCHECK_HEAD:
msg = "memory clobbered before allocated block\n";
break;
case MCHECK_TAIL:
msg = "memory clobbered past end of allocated block\n";
break;
case MCHECK_FREE:
msg = "block freed twice\n";
break;
default:
msg = "bogus mcheck_status, library is buggy\n";
break;
}
printf("\n\nmcheck: %p:%s!!! [%zu]\n\n", p, msg, mcheck_chunk_count_max);
}
static mcheck_abortfunc_t mcheck_abortfunc = &mcheck_default_abort;
static inline size_t allign_size_up(size_t sz, size_t grain)
{
return (sz + grain - 1) & ~(grain - 1);
}
#define mcheck_allign_customer_size(SZ) allign_size_up(SZ, sizeof(mcheck_elem))
#define mcheck_evaluate_memalign_prefix_size(ALIGN) allign_size_up(sizeof(struct mcheck_hdr), ALIGN)
static enum mcheck_status mcheck_OnNok(enum mcheck_status status, const void *p)
{
(*mcheck_abortfunc)(status, p);
return status;
}
static enum mcheck_status mcheck_checkhdr(const struct mcheck_hdr *hdr)
{
int i;
for (i = 0; i < CANARY_DEPTH; ++i)
if (hdr->canary.elems[i] == MAGICFREE)
return mcheck_OnNok(MCHECK_FREE, hdr + 1);
for (i = 0; i < CANARY_DEPTH; ++i)
if (hdr->canary.elems[i] != MAGICWORD)
return mcheck_OnNok(MCHECK_HEAD, hdr + 1);
const size_t payload_size = hdr->size;
const size_t payload_size_aligned = mcheck_allign_customer_size(payload_size);
const size_t padd_size = payload_size_aligned - hdr->size;
const char *payload = (const char *)&hdr[1];
for (i = 0; i < padd_size; ++i)
if (payload[payload_size + i] != PADDINGFLOOD)
return mcheck_OnNok(MCHECK_TAIL, hdr + 1);
const mcheck_canary *tail = (const mcheck_canary *)&payload[payload_size_aligned];
for (i = 0; i < CANARY_DEPTH; ++i)
if (tail->elems[i] != MAGICTAIL)
return mcheck_OnNok(MCHECK_TAIL, hdr + 1);
return MCHECK_OK;
}
enum { KEEP_CONTENT = 0, CLEAN_CONTENT, ANY_ALIGNMENT = 1 };
static void *mcheck_free_helper(void *ptr, int clean_content)
{
if (!ptr)
return ptr;
struct mcheck_hdr *hdr = &((struct mcheck_hdr *)ptr)[-1];
int i;
mcheck_checkhdr(hdr);
for (i = 0; i < CANARY_DEPTH; ++i)
hdr->canary.elems[i] = MAGICFREE;
if (clean_content)
mcheck_flood(ptr, FREEFLOOD, mcheck_allign_customer_size(hdr->size));
for (i = 0; i < REGISTRY_SZ; ++i)
if (mcheck_registry[i] == hdr) {
mcheck_registry[i] = 0;
break;
}
--mcheck_chunk_count;
return (char *)hdr - hdr->aln_skip;
}
static void *mcheck_free_prehook(void *ptr) { return mcheck_free_helper(ptr, CLEAN_CONTENT); }
static void *mcheck_reallocfree_prehook(void *ptr) { return mcheck_free_helper(ptr, KEEP_CONTENT); }
static size_t mcheck_alloc_prehook(size_t sz)
{
sz = mcheck_allign_customer_size(sz);
return sizeof(struct mcheck_hdr) + sz + sizeof(mcheck_canary);
}
static void *mcheck_allocated_helper(void *altoghether_ptr, size_t customer_sz,
size_t alignment, int clean_content)
{
const size_t slop = alignment ?
mcheck_evaluate_memalign_prefix_size(alignment) - sizeof(struct mcheck_hdr) : 0;
struct mcheck_hdr *hdr = (struct mcheck_hdr *)((char *)altoghether_ptr + slop);
int i;
hdr->size = customer_sz;
hdr->aln_skip = slop;
for (i = 0; i < CANARY_DEPTH; ++i)
hdr->canary.elems[i] = MAGICWORD;
char *payload = (char *)&hdr[1];
if (clean_content)
mcheck_flood(payload, MALLOCFLOOD, customer_sz);
const size_t customer_size_aligned = mcheck_allign_customer_size(customer_sz);
mcheck_flood(payload + customer_sz, PADDINGFLOOD, customer_size_aligned - customer_sz);
mcheck_canary *tail = (mcheck_canary *)&payload[customer_size_aligned];
for (i = 0; i < CANARY_DEPTH; ++i)
tail->elems[i] = MAGICTAIL;
++mcheck_chunk_count;
if (mcheck_chunk_count > mcheck_chunk_count_max)
mcheck_chunk_count_max = mcheck_chunk_count;
for (i = 0; i < REGISTRY_SZ; ++i)
if (!mcheck_registry[i]) {
mcheck_registry[i] = hdr;
return payload; // normal end
}
static char *overflow_msg = "\n\n\nERROR: mcheck registry overflow, pedantic check would be incomplete!!\n\n\n\n";
printf("%s", overflow_msg);
overflow_msg = "(mcheck registry full)";
return payload;
}
static void *mcheck_alloc_posthook(void *altoghether_ptr, size_t customer_sz)
{
return mcheck_allocated_helper(altoghether_ptr, customer_sz, ANY_ALIGNMENT, CLEAN_CONTENT);
}
static void *mcheck_alloc_noclean_posthook(void *altoghether_ptr, size_t customer_sz)
{
return mcheck_allocated_helper(altoghether_ptr, customer_sz, ANY_ALIGNMENT, KEEP_CONTENT);
}
static size_t mcheck_memalign_prehook(size_t alig, size_t sz)
{
return mcheck_evaluate_memalign_prefix_size(alig) + sz + sizeof(mcheck_canary);
}
static void *mcheck_memalign_posthook(size_t alignment, void *altoghether_ptr, size_t customer_sz)
{
return mcheck_allocated_helper(altoghether_ptr, customer_sz, alignment, CLEAN_CONTENT);
}
static enum mcheck_status mcheck_mprobe(void *ptr)
{
struct mcheck_hdr *hdr = &((struct mcheck_hdr *)ptr)[-1];
return mcheck_checkhdr(hdr);
}
static void mcheck_pedantic_check(void)
{
int i;
for (i = 0; i < REGISTRY_SZ; ++i)
if (mcheck_registry[i])
mcheck_checkhdr(mcheck_registry[i]);
}
static void mcheck_pedantic_prehook(void)
{
if (mcheck_pedantic_flag)
mcheck_pedantic_check();
}
static void mcheck_initialize(mcheck_abortfunc_t new_func, char pedantic_flag)
{
mcheck_abortfunc = (new_func) ? new_func : &mcheck_default_abort;
mcheck_pedantic_flag = pedantic_flag;
}
void mcheck_on_ramrelocation(size_t offset)
{
char *p;
int i;
// Simple, but inaccurate strategy: drop the pre-reloc heap
for (i = 0; i < REGISTRY_SZ; ++i)
if ((p = mcheck_registry[i]) != NULL ) {
printf("mcheck, WRN: forgetting %p chunk\n", p);
mcheck_registry[i] = 0;
}
mcheck_chunk_count = 0;
}
#endif
#endif

51
include/mcheck.h Normal file
View File

@@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.1+ */
/*
* Copyright (C) 1996-2024 Free Software Foundation, Inc.
* This file is part of the GNU C Library.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* <https://www.gnu.org/licenses/>.
*/
#ifndef _MCHECK_H
#define _MCHECK_H 1
/*
* Return values for `mprobe': these are the kinds of inconsistencies that
* `mcheck' enables detection of.
*/
enum mcheck_status {
MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
MCHECK_OK, /* Block is fine. */
MCHECK_FREE, /* Block freed twice. */
MCHECK_HEAD, /* Memory before the block was clobbered. */
MCHECK_TAIL /* Memory after the block was clobbered. */
};
typedef void (*mcheck_abortfunc_t)(enum mcheck_status, const void *p);
int mcheck(mcheck_abortfunc_t func);
/*
* Similar to `mcheck' but performs checks for all block whenever one of
* the memory handling functions is called. This can be very slow.
*/
int mcheck_pedantic(mcheck_abortfunc_t f);
/* Force check of all blocks now. */
void mcheck_check_all(void);
/*
* Check for aberrations in a particular malloc'd block. These are the
* same checks that `mcheck' does, when you free or reallocate a block.
*/
enum mcheck_status mprobe(void *__ptr);
#endif