]> Joshua Wise's Git repositories - netwatch.git/blame_incremental - lwip/src/core/mem.c
At least pretend to split more ICH2 bits out into a config.mk. A real build system...
[netwatch.git] / lwip / src / core / mem.c
... / ...
CommitLineData
1/**
2 * @file
3 * Dynamic memory manager
4 *
5 * This is a lightweight replacement for the standard C library malloc().
6 *
7 * If you want to use the standard C library malloc() instead, define
8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
9 *
10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13 * of pools like this (more pools can be added between _START and _END):
14 *
15 * Define three pools with sizes 256, 512, and 1512 bytes
16 * LWIP_MALLOC_MEMPOOL_START
17 * LWIP_MALLOC_MEMPOOL(20, 256)
18 * LWIP_MALLOC_MEMPOOL(10, 512)
19 * LWIP_MALLOC_MEMPOOL(5, 1512)
20 * LWIP_MALLOC_MEMPOOL_END
21 */
22
23/*
24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without modification,
28 * are permitted provided that the following conditions are met:
29 *
30 * 1. Redistributions of source code must retain the above copyright notice,
31 * this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright notice,
33 * this list of conditions and the following disclaimer in the documentation
34 * and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 * derived from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
47 * OF SUCH DAMAGE.
48 *
49 * This file is part of the lwIP TCP/IP stack.
50 *
51 * Author: Adam Dunkels <adam@sics.se>
52 * Simon Goldschmidt
53 *
54 */
55
56#include "lwip/opt.h"
57
58#if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
59
60#include "lwip/def.h"
61#include "lwip/mem.h"
62#include "lwip/sys.h"
63#include "lwip/stats.h"
64
65#include <string.h>
66
67#if MEM_USE_POOLS
68/* lwIP head implemented with different sized pools */
69
70/**
71 * This structure is used to save the pool one element came from.
72 */
73struct mem_helper
74{
75 memp_t poolnr;
76};
77
78/**
79 * Allocate memory: determine the smallest pool that is big enough
80 * to contain an element of 'size' and get an element from that pool.
81 *
82 * @param size the size in bytes of the memory needed
83 * @return a pointer to the allocated memory or NULL if the pool is empty
84 */
85void *
86mem_malloc(mem_size_t size)
87{
88 struct mem_helper *element;
89 memp_t poolnr;
90
91 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) {
92 /* is this pool big enough to hold an element of the required size
93 plus a struct mem_helper that saves the pool this element came from? */
94 if ((size + sizeof(struct mem_helper)) <= memp_sizes[poolnr]) {
95 break;
96 }
97 }
98 if (poolnr > MEMP_POOL_LAST) {
99 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
100 return NULL;
101 }
102 element = (struct mem_helper*)memp_malloc(poolnr);
103 if (element == NULL) {
104 /* No need to DEBUGF or ASSERT: This error is already
105 taken care of in memp.c */
106 /** @todo: we could try a bigger pool if this one is empty! */
107 return NULL;
108 }
109
110 /* save the pool number this element came from */
111 element->poolnr = poolnr;
112 /* and return a pointer to the memory directly after the struct mem_helper */
113 element++;
114
115 return element;
116}
117
118/**
119 * Free memory previously allocated by mem_malloc. Loads the pool number
120 * and calls memp_free with that pool number to put the element back into
121 * its pool
122 *
123 * @param rmem the memory element to free
124 */
125void
126mem_free(void *rmem)
127{
128 struct mem_helper *hmem = (struct mem_helper*)rmem;
129
130 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
131 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
132
133 /* get the original struct mem_helper */
134 hmem--;
135
136 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
137 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
138 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
139
140 /* and put it in the pool we saved earlier */
141 memp_free(hmem->poolnr, hmem);
142}
143
144#else /* MEM_USE_POOLS */
145/* lwIP replacement for your libc malloc() */
146
147/**
148 * The heap is made up as a list of structs of this type.
149 * This does not have to be aligned since for getting its size,
150 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
151 */
152struct mem {
153 /** index (-> ram[next]) of the next struct */
154 mem_size_t next;
155 /** index (-> ram[next]) of the next struct */
156 mem_size_t prev;
157 /** 1: this area is used; 0: this area is unused */
158 u8_t used;
159};
160
161/** All allocated blocks will be MIN_SIZE bytes big, at least!
162 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
163 * larger values could prevent too small blocks to fragment the RAM too much. */
164#ifndef MIN_SIZE
165#define MIN_SIZE 12
166#endif /* MIN_SIZE */
167/* some alignment macros: we define them here for better source code layout */
168#define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
169#define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
170#define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
171
172/** the heap. we need one struct mem at the end and some room for alignment */
173static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
174/** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
175static u8_t *ram;
176/** the last entry, always unused! */
177static struct mem *ram_end;
178/** pointer to the lowest free block, this is used for faster search */
179static struct mem *lfree;
180/** concurrent access protection */
181static sys_sem_t mem_sem;
182
183/**
184 * "Plug holes" by combining adjacent empty struct mems.
185 * After this function is through, there should not exist
186 * one empty struct mem pointing to another empty struct mem.
187 *
188 * @param mem this points to a struct mem which just has been freed
189 * @internal this function is only called by mem_free() and mem_realloc()
190 *
191 * This assumes access to the heap is protected by the calling function
192 * already.
193 */
194static void
195plug_holes(struct mem *mem)
196{
197 struct mem *nmem;
198 struct mem *pmem;
199
200 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
201 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
202 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
203
204 /* plug hole forward */
205 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
206
207 nmem = (struct mem *)&ram[mem->next];
208 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
209 /* if mem->next is unused and not end of ram, combine mem and mem->next */
210 if (lfree == nmem) {
211 lfree = mem;
212 }
213 mem->next = nmem->next;
214 ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram;
215 }
216
217 /* plug hole backward */
218 pmem = (struct mem *)&ram[mem->prev];
219 if (pmem != mem && pmem->used == 0) {
220 /* if mem->prev is unused, combine mem and mem->prev */
221 if (lfree == mem) {
222 lfree = pmem;
223 }
224 pmem->next = mem->next;
225 ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram;
226 }
227}
228
229/**
230 * Zero the heap and initialize start, end and lowest-free
231 */
232void
233mem_init(void)
234{
235 struct mem *mem;
236
237 LWIP_ASSERT("Sanity check alignment",
238 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
239
240 /* align the heap */
241 ram = LWIP_MEM_ALIGN(ram_heap);
242 /* initialize the start of the heap */
243 mem = (struct mem *)ram;
244 mem->next = MEM_SIZE_ALIGNED;
245 mem->prev = 0;
246 mem->used = 0;
247 /* initialize the end of the heap */
248 ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED];
249 ram_end->used = 1;
250 ram_end->next = MEM_SIZE_ALIGNED;
251 ram_end->prev = MEM_SIZE_ALIGNED;
252
253 mem_sem = sys_sem_new(1);
254
255 /* initialize the lowest-free pointer to the start of the heap */
256 lfree = (struct mem *)ram;
257
258#if MEM_STATS
259 lwip_stats.mem.avail = MEM_SIZE_ALIGNED;
260#endif /* MEM_STATS */
261}
262
263/**
264 * Put a struct mem back on the heap
265 *
266 * @param rmem is the data portion of a struct mem as returned by a previous
267 * call to mem_malloc()
268 */
269void
270mem_free(void *rmem)
271{
272 struct mem *mem;
273
274 if (rmem == NULL) {
275 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n"));
276 return;
277 }
278 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
279
280 /* protect the heap from concurrent access */
281 sys_arch_sem_wait(mem_sem, 0);
282
283 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
284 (u8_t *)rmem < (u8_t *)ram_end);
285
286 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
287 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
288#if MEM_STATS
289 ++lwip_stats.mem.err;
290#endif /* MEM_STATS */
291 sys_sem_signal(mem_sem);
292 return;
293 }
294 /* Get the corresponding struct mem ... */
295 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
296 /* ... which has to be in a used state ... */
297 LWIP_ASSERT("mem_free: mem->used", mem->used);
298 /* ... and is now unused. */
299 mem->used = 0;
300
301 if (mem < lfree) {
302 /* the newly freed struct is now the lowest */
303 lfree = mem;
304 }
305
306#if MEM_STATS
307 lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram);
308#endif /* MEM_STATS */
309
310 /* finally, see if prev or next are free also */
311 plug_holes(mem);
312 sys_sem_signal(mem_sem);
313}
314
315/**
316 * In contrast to its name, mem_realloc can only shrink memory, not expand it.
317 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
318 * this shouldn't be a problem!
319 *
320 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
321 * @param newsize required size after shrinking (needs to be smaller than or
322 * equal to the previous size)
323 * @return for compatibility reasons: is always == rmem, at the moment
324 */
325void *
326mem_realloc(void *rmem, mem_size_t newsize)
327{
328 mem_size_t size;
329 mem_size_t ptr, ptr2;
330 struct mem *mem, *mem2;
331
332 /* Expand the size of the allocated memory region so that we can
333 adjust for alignment. */
334 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
335
336 if(newsize < MIN_SIZE_ALIGNED) {
337 /* every data block must be at least MIN_SIZE_ALIGNED long */
338 newsize = MIN_SIZE_ALIGNED;
339 }
340
341 if (newsize > MEM_SIZE_ALIGNED) {
342 return NULL;
343 }
344
345 LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
346 (u8_t *)rmem < (u8_t *)ram_end);
347
348 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
349 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
350 return rmem;
351 }
352 /* Get the corresponding struct mem ... */
353 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
354 /* ... and its offset pointer */
355 ptr = (u8_t *)mem - ram;
356
357 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
358 LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
359 if (newsize > size) {
360 /* not supported */
361 return NULL;
362 }
363 if (newsize == size) {
364 /* No change in size, simply return */
365 return rmem;
366 }
367
368 /* protect the heap from concurrent access */
369 sys_arch_sem_wait(mem_sem, 0);
370
371#if MEM_STATS
372 lwip_stats.mem.used -= (size - newsize);
373#endif /* MEM_STATS */
374
375 mem2 = (struct mem *)&ram[mem->next];
376 if(mem2->used == 0) {
377 /* The next struct is unused, we can simply move it at little */
378 mem_size_t next;
379 /* remember the old next pointer */
380 next = mem2->next;
381 /* create new struct mem which is moved directly after the shrinked mem */
382 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
383 if (lfree == mem2) {
384 lfree = (struct mem *)&ram[ptr2];
385 }
386 mem2 = (struct mem *)&ram[ptr2];
387 mem2->used = 0;
388 /* restore the next pointer */
389 mem2->next = next;
390 /* link it back to mem */
391 mem2->prev = ptr;
392 /* link mem to it */
393 mem->next = ptr2;
394 /* last thing to restore linked list: as we have moved mem2,
395 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
396 * the end of the heap */
397 if (mem2->next != MEM_SIZE_ALIGNED) {
398 ((struct mem *)&ram[mem2->next])->prev = ptr2;
399 }
400 /* no need to plug holes, we've already done that */
401 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
402 /* Next struct is used but there's room for another struct mem with
403 * at least MIN_SIZE_ALIGNED of data.
404 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
405 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
406 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
407 * region that couldn't hold data, but when mem->next gets freed,
408 * the 2 regions would be combined, resulting in more free memory */
409 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
410 mem2 = (struct mem *)&ram[ptr2];
411 if (mem2 < lfree) {
412 lfree = mem2;
413 }
414 mem2->used = 0;
415 mem2->next = mem->next;
416 mem2->prev = ptr;
417 mem->next = ptr2;
418 if (mem2->next != MEM_SIZE_ALIGNED) {
419 ((struct mem *)&ram[mem2->next])->prev = ptr2;
420 }
421 /* the original mem->next is used, so no need to plug holes! */
422 }
423 /* else {
424 next struct mem is used but size between mem and mem2 is not big enough
425 to create another struct mem
426 -> don't do anyhting.
427 -> the remaining space stays unused since it is too small
428 } */
429 sys_sem_signal(mem_sem);
430 return rmem;
431}
432
433/**
434 * Adam's mem_malloc() plus solution for bug #17922
435 * Allocate a block of memory with a minimum of 'size' bytes.
436 *
437 * @param size is the minimum size of the requested block in bytes.
438 * @return pointer to allocated memory or NULL if no free memory was found.
439 *
440 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
441 */
442void *
443mem_malloc(mem_size_t size)
444{
445 mem_size_t ptr, ptr2;
446 struct mem *mem, *mem2;
447
448 if (size == 0) {
449 return NULL;
450 }
451
452 /* Expand the size of the allocated memory region so that we can
453 adjust for alignment. */
454 size = LWIP_MEM_ALIGN_SIZE(size);
455
456 if(size < MIN_SIZE_ALIGNED) {
457 /* every data block must be at least MIN_SIZE_ALIGNED long */
458 size = MIN_SIZE_ALIGNED;
459 }
460
461 if (size > MEM_SIZE_ALIGNED) {
462 return NULL;
463 }
464
465 /* protect the heap from concurrent access */
466 sys_arch_sem_wait(mem_sem, 0);
467
468 /* Scan through the heap searching for a free block that is big enough,
469 * beginning with the lowest free block.
470 */
471 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
472 ptr = ((struct mem *)&ram[ptr])->next) {
473 mem = (struct mem *)&ram[ptr];
474
475 if ((!mem->used) &&
476 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
477 /* mem is not used and at least perfect fit is possible:
478 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
479
480 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
481 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
482 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
483 * -> split large block, create empty remainder,
484 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
485 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
486 * struct mem would fit in but no data between mem2 and mem2->next
487 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
488 * region that couldn't hold data, but when mem->next gets freed,
489 * the 2 regions would be combined, resulting in more free memory
490 */
491 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
492 /* create mem2 struct */
493 mem2 = (struct mem *)&ram[ptr2];
494 mem2->used = 0;
495 mem2->next = mem->next;
496 mem2->prev = ptr;
497 /* and insert it between mem and mem->next */
498 mem->next = ptr2;
499 mem->used = 1;
500
501 if (mem2->next != MEM_SIZE_ALIGNED) {
502 ((struct mem *)&ram[mem2->next])->prev = ptr2;
503 }
504#if MEM_STATS
505 lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
506 if (lwip_stats.mem.max < lwip_stats.mem.used) {
507 lwip_stats.mem.max = lwip_stats.mem.used;
508 }
509#endif /* MEM_STATS */
510 } else {
511 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
512 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
513 * take care of this).
514 * -> near fit or excact fit: do not split, no mem2 creation
515 * also can't move mem->next directly behind mem, since mem->next
516 * will always be used at this point!
517 */
518 mem->used = 1;
519#if MEM_STATS
520 lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram);
521 if (lwip_stats.mem.max < lwip_stats.mem.used) {
522 lwip_stats.mem.max = lwip_stats.mem.used;
523 }
524#endif /* MEM_STATS */
525 }
526
527 if (mem == lfree) {
528 /* Find next free block after mem and update lowest free pointer */
529 while (lfree->used && lfree != ram_end) {
530 lfree = (struct mem *)&ram[lfree->next];
531 }
532 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
533 }
534 sys_sem_signal(mem_sem);
535 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
536 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
537 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
538 (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
539 LWIP_ASSERT("mem_malloc: sanity check alignment",
540 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
541
542 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
543 }
544 }
545 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
546#if MEM_STATS
547 ++lwip_stats.mem.err;
548#endif /* MEM_STATS */
549 sys_sem_signal(mem_sem);
550 return NULL;
551}
552
553#endif /* MEM_USE_POOLS */
554/**
555 * Contiguously allocates enough space for count objects that are size bytes
556 * of memory each and returns a pointer to the allocated memory.
557 *
558 * The allocated memory is filled with bytes of value zero.
559 *
560 * @param count number of objects to allocate
561 * @param size size of the objects to allocate
562 * @return pointer to allocated memory / NULL pointer if there is an error
563 */
564void *mem_calloc(mem_size_t count, mem_size_t size)
565{
566 void *p;
567
568 /* allocate 'count' objects of size 'size' */
569 p = mem_malloc(count * size);
570 if (p) {
571 /* zero the memory */
572 memset(p, 0, count * size);
573 }
574 return p;
575}
576
577#endif /* !MEM_LIBC_MALLOC */
This page took 0.030804 seconds and 4 git commands to generate.