Skip to content

Commit 2a1923e

Browse files
committed
Merged latest, 2019-09-07, from upstream umm_malloc
* R.Hempel 2019-09-07 - Separate the malloc() and free() functionality into * wrappers that use critical section protection macros * and static core functions that assume they are * running in a protected con text. Thanks @devyte
1 parent 5ded134 commit 2a1923e

File tree

5 files changed

+131
-80
lines changed

5 files changed

+131
-80
lines changed

cores/esp8266/umm_malloc/umm_info.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#if defined(BUILD_UMM_MALLOC_C)
2+
23
#ifdef UMM_INFO
34

45
/* ----------------------------------------------------------------------------
@@ -36,6 +37,7 @@ void *umm_info( void *ptr, int force ) {
3637
*/
3738
memset( &ummHeapInfo, 0, sizeof( ummHeapInfo ) );
3839

40+
DBGLOG_FORCE( force, "\n" );
3941
DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
4042
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
4143
(unsigned long)(&UMM_BLOCK(blockNo)),
@@ -204,4 +206,5 @@ size_t umm_free_heap_size_lw( void ) {
204206
return (size_t)ummStats.free_blocks * sizeof(umm_block);
205207
}
206208
#endif
207-
#endif
209+
210+
#endif // defined(BUILD_UMM_MALLOC_C)

cores/esp8266/umm_malloc/umm_integrity.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1+
#if defined(BUILD_UMM_MALLOC_C)
12
/* integrity check (UMM_INTEGRITY_CHECK) {{{ */
2-
#if defined(UMM_INTEGRITY_CHECK) && defined(BUILD_UMM_MALLOC_C)
3+
#if defined(UMM_INTEGRITY_CHECK)
34
/*
45
* Perform integrity check of the whole heap data. Returns 1 in case of
56
* success, 0 otherwise.
@@ -130,3 +131,4 @@ int umm_integrity_check(void) {
130131

131132
#endif
132133
/* }}} */
134+
#endif // defined(BUILD_UMM_MALLOC_C)

cores/esp8266/umm_malloc/umm_malloc.cpp

Lines changed: 88 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
* - Move integrity and poison checking to separate file
2525
* R.Hempel 2017-12-29 - Fix bug in realloc when requesting a new block that
2626
* results in OOM error - see Issue 11
27+
* R.Hempel 2019-09-07 - Separate the malloc() and free() functionality into
28+
* wrappers that use critical section protection macros
29+
* and static core functions that assume they are
30+
* running in a protected con text. Thanks @devyte
2731
* ----------------------------------------------------------------------------
2832
*/
2933

@@ -367,22 +371,15 @@ void umm_init( void ) {
367371
}
368372
}
369373

370-
/* ------------------------------------------------------------------------ */
374+
/* ------------------------------------------------------------------------
375+
* Must be called only from within critical sections guarded by
376+
* UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
377+
*/
371378

372-
void umm_free( void *ptr ) {
373-
UMM_CRITICAL_DECL(id_free);
379+
static void umm_free_core( void *ptr ) {
374380

375381
unsigned short int c;
376382

377-
/* If we're being asked to free a NULL pointer, well that's just silly! */
378-
379-
if( (void *)0 == ptr ) {
380-
DBGLOG_DEBUG( "free a null pointer -> do nothing\n" );
381-
STATS__NULL_FREE_REQUEST(id_free);
382-
383-
return;
384-
}
385-
386383
STATS__FREE_REQUEST(id_free);
387384
/*
388385
* FIXME: At some point it might be a good idea to add a check to make sure
@@ -399,9 +396,6 @@ void umm_free( void *ptr ) {
399396

400397
DBGLOG_DEBUG( "Freeing block %6d\n", c );
401398

402-
/* Protect the critical section... */
403-
UMM_CRITICAL_ENTRY(id_free);
404-
405399
/* Update stats Free Block count */
406400
STATS__FREE_BLOCKS_UPDATE(UMM_NBLOCK(c) - c);
407401

@@ -431,50 +425,53 @@ void umm_free( void *ptr ) {
431425

432426
UMM_NBLOCK(c) |= UMM_FREELIST_MASK;
433427
}
434-
435-
/* Release the critical section... */
436-
UMM_CRITICAL_EXIT(id_free);
437428
}
438429

439430
/* ------------------------------------------------------------------------ */
440431

441-
void *umm_malloc( size_t size ) {
442-
UMM_CRITICAL_DECL(id_malloc);
432+
void umm_free( void *ptr ) {
433+
UMM_CRITICAL_DECL(id_free);
443434

444-
unsigned short int blocks;
445-
unsigned short int blockSize = 0;
435+
if (umm_heap == NULL) {
436+
umm_init();
437+
}
446438

447-
unsigned short int bestSize;
448-
unsigned short int bestBlock;
439+
/* If we're being asked to free a NULL pointer, well that's just silly! */
449440

450-
unsigned short int cf;
441+
if( (void *)0 == ptr ) {
442+
DBGLOG_DEBUG( "free a null pointer -> do nothing\n" );
443+
STATS__NULL_FREE_REQUEST(id_free);
451444

452-
if (umm_heap == NULL) {
453-
umm_init();
445+
return;
454446
}
455447

456-
/*
457-
* the very first thing we do is figure out if we're being asked to allocate
458-
* a size of 0 - and if we are we'll simply return a null pointer. if not
459-
* then reduce the size by 1 byte so that the subsequent calculations on
460-
* the number of blocks to allocate are easier...
461-
*/
448+
/* Free the memory withing a protected critical section */
462449

450+
UMM_CRITICAL_ENTRY(id_free);
463451

464-
if( 0 == size ) {
465-
DBGLOG_DEBUG( "malloc a block of 0 bytes -> do nothing\n" );
466-
STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
452+
umm_free_core( ptr );
467453

468-
return( (void *)NULL );
469-
}
454+
UMM_CRITICAL_EXIT(id_free);
455+
}
456+
457+
/* ------------------------------------------------------------------------
458+
* Must be called only from within critical sections guarded by
459+
* UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
460+
*/
461+
462+
static void *umm_malloc_core( size_t size ) {
463+
unsigned short int blocks;
464+
unsigned short int blockSize = 0;
465+
466+
unsigned short int bestSize;
467+
unsigned short int bestBlock;
468+
469+
unsigned short int cf;
470470

471471
STATS__ALLOC_REQUEST(id_malloc, size);
472472

473473
blocks = umm_blocks( size );
474474

475-
/* Protect the critical section... */
476-
UMM_CRITICAL_ENTRY(id_malloc);
477-
478475
/*
479476
* Now we can scan through the free list until we find a space that's big
480477
* enough to hold the number of blocks we need.
@@ -566,16 +563,46 @@ void *umm_malloc( size_t size ) {
566563

567564
DBGLOG_DEBUG( "Can't allocate %5d blocks\n", blocks );
568565

569-
/* Release the critical section... */
570-
UMM_CRITICAL_EXIT(id_malloc);
571-
572566
return( (void *)NULL );
573567
}
574568

575-
/* Release the critical section... */
569+
return( (void *)&UMM_DATA(cf) );
570+
}
571+
572+
/* ------------------------------------------------------------------------ */
573+
574+
void *umm_malloc( size_t size ) {
575+
UMM_CRITICAL_DECL(id_malloc);
576+
577+
void *ptr = NULL;
578+
579+
if (umm_heap == NULL) {
580+
umm_init();
581+
}
582+
583+
/*
584+
* the very first thing we do is figure out if we're being asked to allocate
585+
* a size of 0 - and if we are we'll simply return a null pointer. if not
586+
* then reduce the size by 1 byte so that the subsequent calculations on
587+
* the number of blocks to allocate are easier...
588+
*/
589+
590+
if( 0 == size ) {
591+
DBGLOG_DEBUG( "malloc a block of 0 bytes -> do nothing\n" );
592+
STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
593+
594+
return( ptr );
595+
}
596+
597+
/* Allocate the memory withing a protected critical section */
598+
599+
UMM_CRITICAL_ENTRY(id_malloc);
600+
601+
ptr = umm_malloc_core( size );
602+
576603
UMM_CRITICAL_EXIT(id_malloc);
577604

578-
return( (void *)&UMM_DATA(cf) );
605+
return( ptr );
579606
}
580607

581608
/* ------------------------------------------------------------------------ */
@@ -724,7 +751,7 @@ void *umm_realloc( void *ptr, size_t size ) {
724751
#ifdef UMM_LIGHTWEIGHT_CPU
725752
if ((prevBlockSize + blockSize + nextBlockSize) > blocks) {
726753
umm_split_block( c, blocks, 0 );
727-
umm_free( (void *)&UMM_DATA(c+blocks) );
754+
umm_free_core( (void *)&UMM_DATA(c+blocks) );
728755
}
729756
STATS__FREE_BLOCKS_ISR_MIN();
730757
blockSize = blocks;
@@ -736,19 +763,18 @@ void *umm_realloc( void *ptr, size_t size ) {
736763
ptr = (void *)&UMM_DATA(c);
737764
UMM_CRITICAL_RESUME(id_realloc);
738765
} else {
739-
UMM_CRITICAL_SUSPEND(id_realloc);
740766
DBGLOG_DEBUG( "realloc a completely new block %i\n", blocks );
741767
void *oldptr = ptr;
742-
if( (ptr = umm_malloc( size )) ) {
768+
if( (ptr = umm_malloc_core( size )) ) {
743769
DBGLOG_DEBUG( "realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks );
770+
UMM_CRITICAL_SUSPEND(id_realloc);
744771
memcpy( ptr, oldptr, curSize );
745-
umm_free( oldptr );
746-
blockSize = blocks;
747772
UMM_CRITICAL_RESUME(id_realloc);
773+
umm_free_core( oldptr );
774+
blockSize = blocks;
748775
} else {
749776
DBGLOG_DEBUG( "realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks );
750777
/* This space intentionally left blnk */
751-
UMM_CRITICAL_RESUME(id_realloc);
752778
STATS__OOM_UPDATE();
753779
}
754780
}
@@ -794,7 +820,7 @@ void *umm_realloc( void *ptr, size_t size ) {
794820
#ifdef UMM_LIGHTWEIGHT_CPU
795821
if (blockSize > blocks) {
796822
umm_split_block( c, blocks, 0 );
797-
umm_free( (void *)&UMM_DATA(c+blocks) );
823+
umm_free_core( (void *)&UMM_DATA(c+blocks) );
798824
}
799825
STATS__FREE_BLOCKS_ISR_MIN();
800826
blockSize = blocks;
@@ -813,19 +839,18 @@ void *umm_realloc( void *ptr, size_t size ) {
813839
STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
814840
blockSize += nextBlockSize;
815841
} else { // 4
816-
UMM_CRITICAL_SUSPEND(id_realloc);
817842
DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
818843
void *oldptr = ptr;
819-
if( (ptr = umm_malloc( size )) ) {
844+
if( (ptr = umm_malloc_core( size )) ) {
820845
DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
846+
UMM_CRITICAL_SUSPEND(id_realloc);
821847
memcpy( ptr, oldptr, curSize );
822-
umm_free( oldptr);
823-
blockSize = blocks;
824848
UMM_CRITICAL_RESUME(id_realloc);
849+
umm_free_core( oldptr);
850+
blockSize = blocks;
825851
} else {
826852
DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
827853
/* This space intentionally left blnk */
828-
UMM_CRITICAL_RESUME(id_realloc);
829854
STATS__OOM_UPDATE();
830855
}
831856
}
@@ -836,15 +861,15 @@ void *umm_realloc( void *ptr, size_t size ) {
836861
DBGLOG_DEBUG( "realloc the same or smaller size block - %d, do nothing\n", blocks );
837862
/* This space intentionally left blank */
838863
} else {
839-
UMM_CRITICAL_SUSPEND(id_realloc);
840864
DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
841865
void *oldptr = ptr;
842-
if( (ptr = umm_malloc( size )) ) {
866+
if( (ptr = umm_malloc_core( size )) ) {
843867
DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
868+
UMM_CRITICAL_SUSPEND(id_realloc);
844869
memcpy( ptr, oldptr, curSize );
845-
umm_free( oldptr );
846-
blockSize = blocks;
847870
UMM_CRITICAL_RESUME(id_realloc);
871+
umm_free_core( oldptr );
872+
blockSize = blocks;
848873
} else {
849874
DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
850875
/* This space intentionally left blnk */
@@ -860,7 +885,7 @@ void *umm_realloc( void *ptr, size_t size ) {
860885
if (blockSize > blocks ) {
861886
DBGLOG_DEBUG( "split and free %d blocks from %d\n", blocks, blockSize );
862887
umm_split_block( c, blocks, 0 );
863-
umm_free( (void *)&UMM_DATA(c+blocks) );
888+
umm_free_core( (void *)&UMM_DATA(c+blocks) );
864889
}
865890

866891
STATS__FREE_BLOCKS_MIN();

cores/esp8266/umm_malloc/umm_malloc_cfg.h

Lines changed: 32 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -55,11 +55,20 @@ extern "C" {
5555
* ----------------------------------------------------------------------------
5656
*/
5757

58+
#ifdef TEST_BUILD
59+
extern char test_umm_heap[];
60+
#endif
5861

62+
#ifdef TEST_BUILD
63+
/* Start addresses and the size of the heap */
64+
#define UMM_MALLOC_CFG_HEAP_ADDR (test_umm_heap)
65+
#define UMM_MALLOC_CFG_HEAP_SIZE 0x10000
66+
#else
5967
/* Start addresses and the size of the heap */
6068
extern char _heap_start[];
6169
#define UMM_MALLOC_CFG_HEAP_ADDR ((uint32_t)&_heap_start[0])
6270
#define UMM_MALLOC_CFG_HEAP_SIZE ((size_t)(0x3fffc000 - UMM_MALLOC_CFG_HEAP_ADDR))
71+
#endif
6372

6473
/* A couple of macros to make packing structures less compiler dependent */
6574

@@ -336,17 +345,29 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
336345
* called from within umm_malloc()
337346
*/
338347

339-
#if defined(UMM_CRITICAL_METRICS)
340-
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
341-
#define UMM_CRITICAL_ENTRY(tag)_critical_entry(&time_stats.tag, &_saved_ps_##tag)
342-
#define UMM_CRITICAL_EXIT(tag) _critical_exit(&time_stats.tag, &_saved_ps_##tag)
343-
344-
#else // ! UMM_CRITICAL_METRICS
345-
// This method preserves the intlevel on entry and restores the
346-
// original intlevel at exit.
347-
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
348-
#define UMM_CRITICAL_ENTRY(tag) _saved_ps_##tag = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL)
349-
#define UMM_CRITICAL_EXIT(tag) xt_wsr_ps(_saved_ps_##tag)
348+
#ifdef TEST_BUILD
349+
extern int umm_critical_depth;
350+
extern int umm_max_critical_depth;
351+
#define UMM_CRITICAL_ENTRY() {\
352+
++umm_critical_depth; \
353+
if (umm_critical_depth > umm_max_critical_depth) { \
354+
umm_max_critical_depth = umm_critical_depth; \
355+
} \
356+
}
357+
#define UMM_CRITICAL_EXIT() (umm_critical_depth--)
358+
#else
359+
#if defined(UMM_CRITICAL_METRICS)
360+
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
361+
#define UMM_CRITICAL_ENTRY(tag)_critical_entry(&time_stats.tag, &_saved_ps_##tag)
362+
#define UMM_CRITICAL_EXIT(tag) _critical_exit(&time_stats.tag, &_saved_ps_##tag)
363+
364+
#else // ! UMM_CRITICAL_METRICS
365+
// This method preserves the intlevel on entry and restores the
366+
// original intlevel at exit.
367+
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
368+
#define UMM_CRITICAL_ENTRY(tag) _saved_ps_##tag = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL)
369+
#define UMM_CRITICAL_EXIT(tag) xt_wsr_ps(_saved_ps_##tag)
370+
#endif
350371
#endif
351372

352373
/*
@@ -376,11 +397,9 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
376397
#ifdef UMM_LIGHTWEIGHT_CPU
377398
#define UMM_CRITICAL_SUSPEND(tag) UMM_CRITICAL_EXIT(tag)
378399
#define UMM_CRITICAL_RESUME(tag) UMM_CRITICAL_ENTRY(tag)
379-
#define UMM_NEED_LOCK_LW_CPU true
380400
#else
381401
#define UMM_CRITICAL_SUSPEND(tag) do {} while(0)
382402
#define UMM_CRITICAL_RESUME(tag) do {} while(0)
383-
#define UMM_NEED_LOCK_LW_CPU true
384403
#endif
385404

386405
/*

cores/esp8266/umm_malloc/umm_poison.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1-
/* poisoning (UMM_POISON_CHECK) {{{ */
21
#if defined(BUILD_UMM_MALLOC_C)
2+
3+
/* poisoning (UMM_POISON_CHECK) {{{ */
34
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
45
#define POISON_BYTE (0xa5)
56

@@ -236,4 +237,5 @@ int umm_poison_check(void) {
236237
/* ------------------------------------------------------------------------ */
237238

238239
#endif
239-
#endif
240+
241+
#endif // defined(BUILD_UMM_MALLOC_C)

0 commit comments

Comments
 (0)