]> Joshua Wise's Git repositories - netwatch.git/blame_incremental - lwip/src/core/memp.c
Move aseg-paging to netwatch/
[netwatch.git] / lwip / src / core / memp.c
... / ...
CommitLineData
1/**
2 * @file
3 * Dynamic pool memory manager
4 *
5 * lwIP has dedicated pools for many structures (netconn, protocol control blocks,
6 * packet buffers, ...). All these pools are managed here.
7 */
8
9/*
10 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without modification,
14 * are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
19 * this list of conditions and the following disclaimer in the documentation
20 * and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
27 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
29 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33 * OF SUCH DAMAGE.
34 *
35 * This file is part of the lwIP TCP/IP stack.
36 *
37 * Author: Adam Dunkels <adam@sics.se>
38 *
39 */
40
41#include "lwip/opt.h"
42
43#include "lwip/memp.h"
44#include "lwip/pbuf.h"
45#include "lwip/udp.h"
46#include "lwip/raw.h"
47#include "lwip/tcp.h"
48#include "lwip/igmp.h"
49#include "lwip/api.h"
50#include "lwip/api_msg.h"
51#include "lwip/tcpip.h"
52#include "lwip/sys.h"
53#include "lwip/stats.h"
54#include "netif/etharp.h"
55#include "lwip/ip_frag.h"
56
57#include <string.h>
58
59struct memp {
60 struct memp *next;
61#if MEMP_OVERFLOW_CHECK
62 const char *file;
63 int line;
64#endif /* MEMP_OVERFLOW_CHECK */
65};
66
67#if MEMP_OVERFLOW_CHECK
68/* if MEMP_OVERFLOW_CHECK is turned on, we reserve some bytes at the beginning
69 * and at the end of each element, initialize them as 0xcd and check
70 * them later. */
71/* If MEMP_OVERFLOW_CHECK is >= 2, on every call to memp_malloc or memp_free,
72 * every single element in each pool is checked!
73 * This is VERY SLOW but also very helpful. */
74/* MEMP_SANITY_REGION_BEFORE and MEMP_SANITY_REGION_AFTER can be overridden in
75 * lwipopts.h to change the amount reserved for checking. */
76#ifndef MEMP_SANITY_REGION_BEFORE
77#define MEMP_SANITY_REGION_BEFORE 16
78#endif /* MEMP_SANITY_REGION_BEFORE*/
79#if MEMP_SANITY_REGION_BEFORE > 0
80#define MEMP_SANITY_REGION_BEFORE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_BEFORE)
81#else
82#define MEMP_SANITY_REGION_BEFORE_ALIGNED 0
83#endif /* MEMP_SANITY_REGION_BEFORE*/
84#ifndef MEMP_SANITY_REGION_AFTER
85#define MEMP_SANITY_REGION_AFTER 16
86#endif /* MEMP_SANITY_REGION_AFTER*/
87#if MEMP_SANITY_REGION_AFTER > 0
88#define MEMP_SANITY_REGION_AFTER_ALIGNED LWIP_MEM_ALIGN_SIZE(MEMP_SANITY_REGION_AFTER)
89#else
90#define MEMP_SANITY_REGION_AFTER_ALIGNED 0
91#endif /* MEMP_SANITY_REGION_AFTER*/
92
93/* MEMP_SIZE: save space for struct memp and for sanity check */
94#define MEMP_SIZE (LWIP_MEM_ALIGN_SIZE(sizeof(struct memp)) + MEMP_SANITY_REGION_BEFORE_ALIGNED)
95#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x) + MEMP_SANITY_REGION_AFTER_ALIGNED)
96
97#else /* MEMP_OVERFLOW_CHECK */
98
99/* No sanity checks
100 * We don't need to preserve the struct memp while not allocated, so we
101 * can save a little space and set MEMP_SIZE to 0.
102 */
103#define MEMP_SIZE 0
104#define MEMP_ALIGN_SIZE(x) (LWIP_MEM_ALIGN_SIZE(x))
105
106#endif /* MEMP_OVERFLOW_CHECK */
107
108/** This array holds the first free element of each pool.
109 * Elements form a linked list. */
110static struct memp *memp_tab[MEMP_MAX];
111
112/** This array holds the element sizes of each pool. */
113#if !MEM_USE_POOLS
114static
115#endif
116const u16_t memp_sizes[MEMP_MAX] = {
117#define LWIP_MEMPOOL(name,num,size,desc) MEMP_ALIGN_SIZE(size),
118#include "lwip/memp_std.h"
119};
120
121/** This array holds the number of elements in each pool. */
122static const u16_t memp_num[MEMP_MAX] = {
123#define LWIP_MEMPOOL(name,num,size,desc) (num),
124#include "lwip/memp_std.h"
125};
126
127/** This array holds a textual description of each pool. */
128#ifdef LWIP_DEBUG
129static const char *memp_desc[MEMP_MAX] = {
130#define LWIP_MEMPOOL(name,num,size,desc) (desc),
131#include "lwip/memp_std.h"
132};
133#endif /* LWIP_DEBUG */
134
135/** This is the actual memory used by the pools. */
136static u8_t memp_memory[MEM_ALIGNMENT - 1
137#define LWIP_MEMPOOL(name,num,size,desc) + ( (num) * (MEMP_SIZE + MEMP_ALIGN_SIZE(size) ) )
138#include "lwip/memp_std.h"
139];
140
141#if MEMP_SANITY_CHECK
142/**
143 * Check that memp-lists don't form a circle
144 */
145static int
146memp_sanity(void)
147{
148 s16_t i, c;
149 struct memp *m, *n;
150
151 for (i = 0; i < MEMP_MAX; i++) {
152 for (m = memp_tab[i]; m != NULL; m = m->next) {
153 c = 1;
154 for (n = memp_tab[i]; n != NULL; n = n->next) {
155 if (n == m && --c < 0) {
156 return 0;
157 }
158 }
159 }
160 }
161 return 1;
162}
163#endif /* MEMP_SANITY_CHECK*/
164#if MEMP_OVERFLOW_CHECK
165/**
166 * Check if a memp element was victim of an overflow
167 * (e.g. the restricted area after it has been altered)
168 *
169 * @param p the memp element to check
170 * @param memp_size the element size of the pool p comes from
171 */
172static void
173memp_overflow_check_element(struct memp *p, u16_t memp_size)
174{
175 u16_t k;
176 u8_t *m;
177#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0
178 m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED;
179 for (k = 0; k < MEMP_SANITY_REGION_BEFORE_ALIGNED; k++) {
180 if (m[k] != 0xcd) {
181 LWIP_ASSERT("detected memp underflow!", 0);
182 }
183 }
184#endif
185#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0
186 m = (u8_t*)p + MEMP_SIZE + memp_size - MEMP_SANITY_REGION_AFTER_ALIGNED;
187 for (k = 0; k < MEMP_SANITY_REGION_AFTER_ALIGNED; k++) {
188 if (m[k] != 0xcd) {
189 LWIP_ASSERT("detected memp overflow!", 0);
190 }
191 }
192#endif
193}
194
195/**
196 * Do an overflow check for all elements in every pool.
197 *
198 * @see memp_overflow_check_element for a description of the check
199 */
200static void
201memp_overflow_check_all(void)
202{
203 u16_t i, j;
204 struct memp *p;
205
206 p = LWIP_MEM_ALIGN(memp_memory);
207 for (i = 0; i < MEMP_MAX; ++i) {
208 p = p;
209 for (j = 0; j < memp_num[i]; ++j) {
210 memp_overflow_check_element(p, memp_sizes[i]);
211 p = (struct memp*)((u8_t*)p + MEMP_SIZE + memp_sizes[i]);
212 }
213 }
214}
215
216/**
217 * Initialize the restricted areas of all memp elements in every pool.
218 */
219static void
220memp_overflow_init(void)
221{
222 u16_t i, j;
223 struct memp *p;
224 u8_t *m;
225
226 p = LWIP_MEM_ALIGN(memp_memory);
227 for (i = 0; i < MEMP_MAX; ++i) {
228 p = p;
229 for (j = 0; j < memp_num[i]; ++j) {
230#if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0
231 m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED;
232 memset(m, 0xcd, MEMP_SANITY_REGION_BEFORE_ALIGNED);
233#endif
234#if MEMP_SANITY_REGION_AFTER_ALIGNED > 0
235 m = (u8_t*)p + MEMP_SIZE + memp_sizes[i] - MEMP_SANITY_REGION_AFTER_ALIGNED;
236 memset(m, 0xcd, MEMP_SANITY_REGION_AFTER_ALIGNED);
237#endif
238 p = (struct memp*)((u8_t*)p + MEMP_SIZE + memp_sizes[i]);
239 }
240 }
241}
242#endif /* MEMP_OVERFLOW_CHECK */
243
244/**
245 * Initialize this module.
246 *
247 * Carves out memp_memory into linked lists for each pool-type.
248 */
249void
250memp_init(void)
251{
252 struct memp *memp;
253 u16_t i, j;
254
255#if MEMP_STATS
256 for (i = 0; i < MEMP_MAX; ++i) {
257 lwip_stats.memp[i].used = lwip_stats.memp[i].max =
258 lwip_stats.memp[i].err = 0;
259 lwip_stats.memp[i].avail = memp_num[i];
260 }
261#endif /* MEMP_STATS */
262
263 memp = LWIP_MEM_ALIGN(memp_memory);
264 /* for every pool: */
265 for (i = 0; i < MEMP_MAX; ++i) {
266 memp_tab[i] = NULL;
267 /* create a linked list of memp elements */
268 for (j = 0; j < memp_num[i]; ++j) {
269 memp->next = memp_tab[i];
270 memp_tab[i] = memp;
271 memp = (struct memp *)((u8_t *)memp + MEMP_SIZE + memp_sizes[i]);
272 }
273 }
274#if MEMP_OVERFLOW_CHECK
275 memp_overflow_init();
276 /* check everything a first time to see if it worked */
277 memp_overflow_check_all();
278#endif /* MEMP_OVERFLOW_CHECK */
279}
280
281/**
282 * Get an element from a specific pool.
283 *
284 * @param type the pool to get an element from
285 *
286 * the debug version has two more parameters:
287 * @param file file name calling this function
288 * @param line number of line where this function is called
289 *
290 * @return a pointer to the allocated memory or a NULL pointer on error
291 */
292void *
293#if !MEMP_OVERFLOW_CHECK
294memp_malloc(memp_t type)
295#else
296memp_malloc_fn(memp_t type, const char* file, const int line)
297#endif
298{
299 struct memp *memp;
300 SYS_ARCH_DECL_PROTECT(old_level);
301
302 LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;);
303
304 SYS_ARCH_PROTECT(old_level);
305#if MEMP_OVERFLOW_CHECK >= 2
306 memp_overflow_check_all();
307#endif /* MEMP_OVERFLOW_CHECK >= 2 */
308
309 memp = memp_tab[type];
310
311 if (memp != NULL) {
312 memp_tab[type] = memp->next;
313#if MEMP_OVERFLOW_CHECK
314 memp->next = NULL;
315 memp->file = file;
316 memp->line = line;
317#endif /* MEMP_OVERFLOW_CHECK */
318#if MEMP_STATS
319 ++lwip_stats.memp[type].used;
320 if (lwip_stats.memp[type].used > lwip_stats.memp[type].max) {
321 lwip_stats.memp[type].max = lwip_stats.memp[type].used;
322 }
323#endif /* MEMP_STATS */
324 LWIP_ASSERT("memp_malloc: memp properly aligned",
325 ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0);
326 memp = (struct memp*)((u8_t*)memp + MEMP_SIZE);
327 } else {
328 LWIP_DEBUGF(MEMP_DEBUG | 2, ("memp_malloc: out of memory in pool %s\n", memp_desc[type]));
329#if MEMP_STATS
330 ++lwip_stats.memp[type].err;
331#endif /* MEMP_STATS */
332 }
333
334 SYS_ARCH_UNPROTECT(old_level);
335
336 return memp;
337}
338
339/**
340 * Put an element back into its pool.
341 *
342 * @param type the pool where to put mem
343 * @param mem the memp element to free
344 */
345void
346memp_free(memp_t type, void *mem)
347{
348 struct memp *memp;
349 SYS_ARCH_DECL_PROTECT(old_level);
350
351 if (mem == NULL) {
352 return;
353 }
354 LWIP_ASSERT("memp_free: mem properly aligned",
355 ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0);
356
357 memp = (struct memp *)((u8_t*)mem - MEMP_SIZE);
358
359 SYS_ARCH_PROTECT(old_level);
360#if MEMP_OVERFLOW_CHECK
361#if MEMP_OVERFLOW_CHECK >= 2
362 memp_overflow_check_all();
363#else
364 memp_overflow_check_element(memp, memp_sizes[type]);
365#endif /* MEMP_OVERFLOW_CHECK >= 2 */
366#endif /* MEMP_OVERFLOW_CHECK */
367
368#if MEMP_STATS
369 lwip_stats.memp[type].used--;
370#endif /* MEMP_STATS */
371
372 memp->next = memp_tab[type];
373 memp_tab[type] = memp;
374
375#if MEMP_SANITY_CHECK
376 LWIP_ASSERT("memp sanity", memp_sanity());
377#endif /* MEMP_SANITY_CHECK */
378
379 SYS_ARCH_UNPROTECT(old_level);
380}
This page took 0.025581 seconds and 4 git commands to generate.