]> Joshua Wise's Git repositories - netwatch.git/blame - lwip/src/core/pbuf.c
Turn off alignment checking.
[netwatch.git] / lwip / src / core / pbuf.c
CommitLineData
6e6d4a8b
JP
1/**
2 * @file
3 * Packet buffer management
4 *
5 * Packets are built from the pbuf data structure. It supports dynamic
6 * memory allocation for packet contents or can reference externally
7 * managed packet contents both in RAM and ROM. Quick allocation for
8 * incoming packets is provided through pools with fixed sized pbufs.
9 *
10 * A packet may span over multiple pbufs, chained as a singly linked
11 * list. This is called a "pbuf chain".
12 *
13 * Multiple packets may be queued, also using this singly linked list.
14 * This is called a "packet queue".
15 *
16 * So, a packet queue consists of one or more pbuf chains, each of
17 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
18 * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
19 *
20 * The differences between a pbuf chain and a packet queue are very
21 * precise but subtle.
22 *
23 * The last pbuf of a packet has a ->tot_len field that equals the
24 * ->len field. It can be found by traversing the list. If the last
25 * pbuf of a packet has a ->next field other than NULL, more packets
26 * are on the queue.
27 *
28 * Therefore, looping through a pbuf of a single packet, has an
29 * loop end condition (tot_len == p->len), NOT (next == NULL).
30 */
31
32/*
33 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without modification,
37 * are permitted provided that the following conditions are met:
38 *
39 * 1. Redistributions of source code must retain the above copyright notice,
40 * this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright notice,
42 * this list of conditions and the following disclaimer in the documentation
43 * and/or other materials provided with the distribution.
44 * 3. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
48 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
50 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
51 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
52 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
55 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
56 * OF SUCH DAMAGE.
57 *
58 * This file is part of the lwIP TCP/IP stack.
59 *
60 * Author: Adam Dunkels <adam@sics.se>
61 *
62 */
63
64#include "lwip/opt.h"
65
66#include "lwip/stats.h"
67#include "lwip/def.h"
68#include "lwip/mem.h"
69#include "lwip/memp.h"
70#include "lwip/pbuf.h"
71#include "lwip/sys.h"
72#include "arch/perf.h"
73
74#include <string.h>
75
76#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
77/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
78 aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
79#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
80
81/**
82 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
83 *
84 * The actual memory allocated for the pbuf is determined by the
85 * layer at which the pbuf is allocated and the requested size
86 * (from the size parameter).
87 *
88 * @param layer flag to define header size
89 * @param length size of the pbuf's payload
90 * @param type this parameter decides how and where the pbuf
91 * should be allocated as follows:
92 *
93 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
94 * chunk. This includes protocol headers as well.
95 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
96 * protocol headers. Additional headers must be prepended
97 * by allocating another pbuf and chain in to the front of
98 * the ROM pbuf. It is assumed that the memory used is really
99 * similar to ROM in that it is immutable and will not be
100 * changed. Memory which is dynamic should generally not
101 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
102 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
103 * protocol headers. It is assumed that the pbuf is only
104 * being used in a single thread. If the pbuf gets queued,
105 * then pbuf_take should be called to copy the buffer.
106 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
107 * the pbuf pool that is allocated during pbuf_init().
108 *
109 * @return the allocated pbuf. If multiple pbufs where allocated, this
110 * is the first pbuf of a pbuf chain.
111 */
112struct pbuf *
113pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
114{
115 struct pbuf *p, *q, *r;
116 u16_t offset;
117 s32_t rem_len; /* remaining length */
118 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_alloc(length=%"U16_F")\n", length));
119
120 /* determine header offset */
121 offset = 0;
122 switch (layer) {
123 case PBUF_TRANSPORT:
124 /* add room for transport (often TCP) layer header */
125 offset += PBUF_TRANSPORT_HLEN;
126 /* FALLTHROUGH */
127 case PBUF_IP:
128 /* add room for IP layer header */
129 offset += PBUF_IP_HLEN;
130 /* FALLTHROUGH */
131 case PBUF_LINK:
132 /* add room for link layer header */
133 offset += PBUF_LINK_HLEN;
134 break;
135 case PBUF_RAW:
136 break;
137 default:
138 LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
139 return NULL;
140 }
141
142 switch (type) {
143 case PBUF_POOL:
144 /* allocate head of pbuf chain into p */
145 p = memp_malloc(MEMP_PBUF_POOL);
146 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
147 if (p == NULL) {
148 return NULL;
149 }
150 p->type = type;
151 p->next = NULL;
152
153 /* make the payload pointer point 'offset' bytes into pbuf data memory */
154 p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
155 LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
156 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
157 /* the total length of the pbuf chain is the requested size */
158 p->tot_len = length;
159 /* set the length of the first pbuf in the chain */
160 p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
161 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
162 ((u8_t*)p->payload + p->len <=
163 (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
164 /* set reference count (needed here in case we fail) */
165 p->ref = 1;
166
167 /* now allocate the tail of the pbuf chain */
168
169 /* remember first pbuf for linkage in next iteration */
170 r = p;
171 /* remaining length to be allocated */
172 rem_len = length - p->len;
173 /* any remaining pbufs to be allocated? */
174 while (rem_len > 0) {
175 q = memp_malloc(MEMP_PBUF_POOL);
176 if (q == NULL) {
177 /* free chain so far allocated */
178 pbuf_free(p);
179 /* bail out unsuccesfully */
180 return NULL;
181 }
182 q->type = type;
183 q->flags = 0;
184 q->next = NULL;
185 /* make previous pbuf point to this pbuf */
186 r->next = q;
187 /* set total length of this pbuf and next in chain */
188 LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff);
189 q->tot_len = (u16_t)rem_len;
190 /* this pbuf length is pool size, unless smaller sized tail */
191 q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED);
192 q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF);
193 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
194 ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
195 LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
196 ((u8_t*)p->payload + p->len <=
197 (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
198 q->ref = 1;
199 /* calculate remaining length to be allocated */
200 rem_len -= q->len;
201 /* remember this pbuf for linkage in next iteration */
202 r = q;
203 }
204 /* end of chain */
205 /*r->next = NULL;*/
206
207 break;
208 case PBUF_RAM:
209 /* If pbuf is to be allocated in RAM, allocate memory for it. */
210 p = (struct pbuf*)mem_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length));
211 if (p == NULL) {
212 return NULL;
213 }
214 /* Set up internal structure of the pbuf. */
215 p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
216 p->len = p->tot_len = length;
217 p->next = NULL;
218 p->type = type;
219
220 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
221 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
222 break;
223 /* pbuf references existing (non-volatile static constant) ROM payload? */
224 case PBUF_ROM:
225 /* pbuf references existing (externally allocated) RAM payload? */
226 case PBUF_REF:
227 /* only allocate memory for the pbuf structure */
228 p = memp_malloc(MEMP_PBUF);
229 if (p == NULL) {
230 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 2, ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n",
231 (type == PBUF_ROM) ? "ROM" : "REF"));
232 return NULL;
233 }
234 /* caller must set this field properly, afterwards */
235 p->payload = NULL;
236 p->len = p->tot_len = length;
237 p->next = NULL;
238 p->type = type;
239 break;
240 default:
241 LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
242 return NULL;
243 }
244 /* set reference count */
245 p->ref = 1;
246 /* set flags */
247 p->flags = 0;
248 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
249 return p;
250}
251
252
253/**
254 * Shrink a pbuf chain to a desired length.
255 *
256 * @param p pbuf to shrink.
257 * @param new_len desired new length of pbuf chain
258 *
259 * Depending on the desired length, the first few pbufs in a chain might
260 * be skipped and left unchanged. The new last pbuf in the chain will be
261 * resized, and any remaining pbufs will be freed.
262 *
263 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
264 * @note May not be called on a packet queue.
265 *
266 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
267 */
268void
269pbuf_realloc(struct pbuf *p, u16_t new_len)
270{
271 struct pbuf *q;
272 u16_t rem_len; /* remaining length */
273 s32_t grow;
274
275 LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL ||
276 p->type == PBUF_ROM ||
277 p->type == PBUF_RAM ||
278 p->type == PBUF_REF);
279
280 /* desired length larger than current length? */
281 if (new_len >= p->tot_len) {
282 /* enlarging not yet supported */
283 return;
284 }
285
286 /* the pbuf chain grows by (new_len - p->tot_len) bytes
287 * (which may be negative in case of shrinking) */
288 grow = new_len - p->tot_len;
289
290 /* first, step over any pbufs that should remain in the chain */
291 rem_len = new_len;
292 q = p;
293 /* should this pbuf be kept? */
294 while (rem_len > q->len) {
295 /* decrease remaining length by pbuf length */
296 rem_len -= q->len;
297 /* decrease total length indicator */
298 LWIP_ASSERT("grow < max_u16_t", grow < 0xffff);
299 q->tot_len += (u16_t)grow;
300 /* proceed to next pbuf in chain */
301 q = q->next;
302 }
303 /* we have now reached the new last pbuf (in q) */
304 /* rem_len == desired length for pbuf q */
305
306 /* shrink allocated memory for PBUF_RAM */
307 /* (other types merely adjust their length fields */
308 if ((q->type == PBUF_RAM) && (rem_len != q->len)) {
309 /* reallocate and adjust the length of the pbuf that will be split */
310 q = mem_realloc(q, (u8_t *)q->payload - (u8_t *)q + rem_len);
311 LWIP_ASSERT("mem_realloc give q == NULL", q != NULL);
312 }
313 /* adjust length fields for new last pbuf */
314 q->len = rem_len;
315 q->tot_len = q->len;
316
317 /* any remaining pbufs in chain? */
318 if (q->next != NULL) {
319 /* free remaining pbufs in chain */
320 pbuf_free(q->next);
321 }
322 /* q is last packet in chain */
323 q->next = NULL;
324
325}
326
327/**
328 * Adjusts the payload pointer to hide or reveal headers in the payload.
329 *
330 * Adjusts the ->payload pointer so that space for a header
331 * (dis)appears in the pbuf payload.
332 *
333 * The ->payload, ->tot_len and ->len fields are adjusted.
334 *
335 * @param p pbuf to change the header size.
336 * @param header_size_increment Number of bytes to increment header size which
337 * increases the size of the pbuf. New space is on the front.
338 * (Using a negative value decreases the header size.)
339 * If hdr_size_inc is 0, this function does nothing and returns succesful.
340 *
341 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
342 * the call will fail. A check is made that the increase in header size does
343 * not move the payload pointer in front of the start of the buffer.
344 * @return non-zero on failure, zero on success.
345 *
346 */
347u8_t
348pbuf_header(struct pbuf *p, s16_t header_size_increment)
349{
350 u16_t type;
351 void *payload;
352 u16_t increment_magnitude;
353
354 LWIP_ASSERT("p != NULL", p != NULL);
355 if ((header_size_increment == 0) || (p == NULL))
356 return 0;
357
358 if (header_size_increment < 0){
359 increment_magnitude = -header_size_increment;
360 /* Check that we aren't going to move off the end of the pbuf */
361 LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
362 } else {
363 increment_magnitude = header_size_increment;
364#if 0
365 /* Can't assert these as some callers speculatively call
366 pbuf_header() to see if it's OK. Will return 1 below instead. */
367 /* Check that we've got the correct type of pbuf to work with */
368 LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL",
369 p->type == PBUF_RAM || p->type == PBUF_POOL);
370 /* Check that we aren't going to move off the beginning of the pbuf */
371 LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF",
372 (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF);
373#endif
374 }
375
376 type = p->type;
377 /* remember current payload pointer */
378 payload = p->payload;
379
380 /* pbuf types containing payloads? */
381 if (type == PBUF_RAM || type == PBUF_POOL) {
382 /* set new payload pointer */
383 p->payload = (u8_t *)p->payload - header_size_increment;
384 /* boundary check fails? */
385 if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
386 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_header: failed as %p < %p (not enough space for new header size)\n",
387 (void *)p->payload,
388 (void *)(p + 1)));\
389 /* restore old payload pointer */
390 p->payload = payload;
391 /* bail out unsuccesfully */
392 return 1;
393 }
394 /* pbuf types refering to external payloads? */
395 } else if (type == PBUF_REF || type == PBUF_ROM) {
396 /* hide a header in the payload? */
397 if ((header_size_increment < 0) && (increment_magnitude <= p->len)) {
398 /* increase payload pointer */
399 p->payload = (u8_t *)p->payload - header_size_increment;
400 } else {
401 /* cannot expand payload to front (yet!)
402 * bail out unsuccesfully */
403 return 1;
404 }
405 }
406 else {
407 /* Unknown type */
408 LWIP_ASSERT("bad pbuf type", 0);
409 return 1;
410 }
411 /* modify pbuf length fields */
412 p->len += header_size_increment;
413 p->tot_len += header_size_increment;
414
415 LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_header: old %p new %p (%"S16_F")\n",
416 (void *)payload, (void *)p->payload, header_size_increment));
417
418 return 0;
419}
420
421/**
422 * Dereference a pbuf chain or queue and deallocate any no-longer-used
423 * pbufs at the head of this chain or queue.
424 *
425 * Decrements the pbuf reference count. If it reaches zero, the pbuf is
426 * deallocated.
427 *
428 * For a pbuf chain, this is repeated for each pbuf in the chain,
429 * up to the first pbuf which has a non-zero reference count after
430 * decrementing. So, when all reference counts are one, the whole
431 * chain is free'd.
432 *
433 * @param p The pbuf (chain) to be dereferenced.
434 *
435 * @return the number of pbufs that were de-allocated
436 * from the head of the chain.
437 *
438 * @note MUST NOT be called on a packet queue (Not verified to work yet).
439 * @note the reference counter of a pbuf equals the number of pointers
440 * that refer to the pbuf (or into the pbuf).
441 *
442 * @internal examples:
443 *
444 * Assuming existing chains a->b->c with the following reference
445 * counts, calling pbuf_free(a) results in:
446 *
447 * 1->2->3 becomes ...1->3
448 * 3->3->3 becomes 2->3->3
449 * 1->1->2 becomes ......1
450 * 2->1->1 becomes 1->1->1
451 * 1->1->1 becomes .......
452 *
453 */
454u8_t
455pbuf_free(struct pbuf *p)
456{
457 u16_t type;
458 struct pbuf *q;
459 u8_t count;
460
461 if (p == NULL) {
462 LWIP_ASSERT("p != NULL", p != NULL);
463 /* if assertions are disabled, proceed with debug output */
464 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 2, ("pbuf_free(p == NULL) was called.\n"));
465 return 0;
466 }
467 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_free(%p)\n", (void *)p));
468
469 PERF_START;
470
471 LWIP_ASSERT("pbuf_free: sane type",
472 p->type == PBUF_RAM || p->type == PBUF_ROM ||
473 p->type == PBUF_REF || p->type == PBUF_POOL);
474
475 count = 0;
476 /* de-allocate all consecutive pbufs from the head of the chain that
477 * obtain a zero reference count after decrementing*/
478 while (p != NULL) {
479 u16_t ref;
480 SYS_ARCH_DECL_PROTECT(old_level);
481 /* Since decrementing ref cannot be guaranteed to be a single machine operation
482 * we must protect it. We put the new ref into a local variable to prevent
483 * further protection. */
484 SYS_ARCH_PROTECT(old_level);
485 /* all pbufs in a chain are referenced at least once */
486 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
487 /* decrease reference count (number of pointers to pbuf) */
488 ref = --(p->ref);
489 SYS_ARCH_UNPROTECT(old_level);
490 /* this pbuf is no longer referenced to? */
491 if (ref == 0) {
492 /* remember next pbuf in chain for next iteration */
493 q = p->next;
494 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: deallocating %p\n", (void *)p));
495 type = p->type;
496 /* is this a pbuf from the pool? */
497 if (type == PBUF_POOL) {
498 memp_free(MEMP_PBUF_POOL, p);
499 /* is this a ROM or RAM referencing pbuf? */
500 } else if (type == PBUF_ROM || type == PBUF_REF) {
501 memp_free(MEMP_PBUF, p);
502 /* type == PBUF_RAM */
503 } else {
504 mem_free(p);
505 }
506 count++;
507 /* proceed to next pbuf */
508 p = q;
509 /* p->ref > 0, this pbuf is still referenced to */
510 /* (and so the remaining pbufs in chain as well) */
511 } else {
512 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref));
513 /* stop walking through the chain */
514 p = NULL;
515 }
516 }
517 PERF_STOP("pbuf_free");
518 /* return number of de-allocated pbufs */
519 return count;
520}
521
522/**
523 * Count number of pbufs in a chain
524 *
525 * @param p first pbuf of chain
526 * @return the number of pbufs in a chain
527 */
528
529u8_t
530pbuf_clen(struct pbuf *p)
531{
532 u8_t len;
533
534 len = 0;
535 while (p != NULL) {
536 ++len;
537 p = p->next;
538 }
539 return len;
540}
541
542/**
543 * Increment the reference count of the pbuf.
544 *
545 * @param p pbuf to increase reference counter of
546 *
547 */
548void
549pbuf_ref(struct pbuf *p)
550{
551 SYS_ARCH_DECL_PROTECT(old_level);
552 /* pbuf given? */
553 if (p != NULL) {
554 SYS_ARCH_PROTECT(old_level);
555 ++(p->ref);
556 SYS_ARCH_UNPROTECT(old_level);
557 }
558}
559
560/**
561 * Concatenate two pbufs (each may be a pbuf chain) and take over
562 * the caller's reference of the tail pbuf.
563 *
564 * @note The caller MAY NOT reference the tail pbuf afterwards.
565 * Use pbuf_chain() for that purpose.
566 *
567 * @see pbuf_chain()
568 */
569
570void
571pbuf_cat(struct pbuf *h, struct pbuf *t)
572{
573 struct pbuf *p;
574
575 LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
576 ((h != NULL) && (t != NULL)), return;);
577
578 /* proceed to last pbuf of chain */
579 for (p = h; p->next != NULL; p = p->next) {
580 /* add total length of second chain to all totals of first chain */
581 p->tot_len += t->tot_len;
582 }
583 /* { p is last pbuf of first h chain, p->next == NULL } */
584 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
585 LWIP_ASSERT("p->next == NULL", p->next == NULL);
586 /* add total length of second chain to last pbuf total of first chain */
587 p->tot_len += t->tot_len;
588 /* chain last pbuf of head (p) with first of tail (t) */
589 p->next = t;
590 /* p->next now references t, but the caller will drop its reference to t,
591 * so netto there is no change to the reference count of t.
592 */
593}
594
595/**
596 * Chain two pbufs (or pbuf chains) together.
597 *
598 * The caller MUST call pbuf_free(t) once it has stopped
599 * using it. Use pbuf_cat() instead if you no longer use t.
600 *
601 * @param h head pbuf (chain)
602 * @param t tail pbuf (chain)
603 * @note The pbufs MUST belong to the same packet.
604 * @note MAY NOT be called on a packet queue.
605 *
606 * The ->tot_len fields of all pbufs of the head chain are adjusted.
607 * The ->next field of the last pbuf of the head chain is adjusted.
608 * The ->ref field of the first pbuf of the tail chain is adjusted.
609 *
610 */
611void
612pbuf_chain(struct pbuf *h, struct pbuf *t)
613{
614 pbuf_cat(h, t);
615 /* t is now referenced by h */
616 pbuf_ref(t);
617 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_FRESH | 2, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
618}
619
620/**
621 * Dechains the first pbuf from its succeeding pbufs in the chain.
622 *
623 * Makes p->tot_len field equal to p->len.
624 * @param p pbuf to dechain
625 * @return remainder of the pbuf chain, or NULL if it was de-allocated.
626 * @note May not be called on a packet queue.
627 */
628struct pbuf *
629pbuf_dechain(struct pbuf *p)
630{
631 struct pbuf *q;
632 u8_t tail_gone = 1;
633 /* tail */
634 q = p->next;
635 /* pbuf has successor in chain? */
636 if (q != NULL) {
637 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
638 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
639 /* enforce invariant if assertion is disabled */
640 q->tot_len = p->tot_len - p->len;
641 /* decouple pbuf from remainder */
642 p->next = NULL;
643 /* total length of pbuf p is its own length only */
644 p->tot_len = p->len;
645 /* q is no longer referenced by p, free it */
646 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_STATE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
647 tail_gone = pbuf_free(q);
648 if (tail_gone > 0) {
649 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_STATE,
650 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
651 }
652 /* return remaining tail or NULL if deallocated */
653 }
654 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
655 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
656 return ((tail_gone > 0) ? NULL : q);
657}
658
659/**
660 *
661 * Create PBUF_RAM copies of pbufs.
662 *
663 * Used to queue packets on behalf of the lwIP stack, such as
664 * ARP based queueing.
665 *
666 * @note You MUST explicitly use p = pbuf_take(p);
667 *
668 * @note Only one packet is copied, no packet queue!
669 *
670 * @param p_to pbuf source of the copy
671 * @param p_from pbuf destination of the copy
672 *
673 * @return ERR_OK if pbuf was copied
674 * ERR_ARG if one of the pbufs is NULL or p_to is not big
675 * enough to hold p_from
676 */
677err_t
678pbuf_copy(struct pbuf *p_to, struct pbuf *p_from)
679{
680 u16_t offset_to=0, offset_from=0, len;
681
682 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 3, ("pbuf_copy(%p, %p)\n",
683 (void*)p_to, (void*)p_from));
684
685 /* is the target big enough to hold the source? */
686 LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
687 (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
688
689 /* iterate through pbuf chain */
690 do
691 {
692 LWIP_ASSERT("p_to != NULL", p_to != NULL);
693 /* copy one part of the original chain */
694 if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
695 /* complete current p_from fits into current p_to */
696 len = p_from->len - offset_from;
697 } else {
698 /* current p_from does not fit into current p_to */
699 len = p_to->len - offset_to;
700 }
701 MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len);
702 offset_to += len;
703 offset_from += len;
704 LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
705 if (offset_to == p_to->len) {
706 /* on to next p_to (if any) */
707 offset_to = 0;
708 p_to = p_to->next;
709 }
710 LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
711 if (offset_from >= p_from->len) {
712 /* on to next p_from (if any) */
713 offset_from = 0;
714 p_from = p_from->next;
715 }
716
717 if((p_from != NULL) && (p_from->len == p_from->tot_len)) {
718 /* don't copy more than one packet! */
719 LWIP_ERROR("pbuf_copy() does not allow packet queues!\n",
720 (p_from->next == NULL), return ERR_VAL;);
721 }
722 if((p_to != NULL) && (p_to->len == p_to->tot_len)) {
723 /* don't copy more than one packet! */
724 LWIP_ERROR("pbuf_copy() does not allow packet queues!\n",
725 (p_to->next == NULL), return ERR_VAL;);
726 }
727 } while (p_from);
728 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE | 1, ("pbuf_copy: end of chain reached.\n"));
729 return ERR_OK;
730}
731
732/**
733 * Copy (part of) the contents of a packet buffer
734 * to an application supplied buffer.
735 *
736 * @param buf the pbuf from which to copy data
737 * @param dataptr the application supplied buffer
738 * @param len length of data to copy (dataptr must be big enough)
739 * @param offset offset into the packet buffer from where to begin copying len bytes
740 */
741u16_t
742pbuf_copy_partial(struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
743{
744 struct pbuf *p;
745 u16_t left;
746 u16_t buf_copy_len;
747 u16_t copied_total = 0;
748
749 LWIP_ERROR("netbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
750 LWIP_ERROR("netbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
751
752 left = 0;
753
754 if((buf == NULL) || (dataptr == NULL)) {
755 return 0;
756 }
757
758 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
759 for(p = buf; len != 0 && p != NULL; p = p->next) {
760 if ((offset != 0) && (offset >= p->len)) {
761 /* don't copy from this buffer -> on to the next */
762 offset -= p->len;
763 } else {
764 /* copy from this buffer. maybe only partially. */
765 buf_copy_len = p->len - offset;
766 if (buf_copy_len > len)
767 buf_copy_len = len;
768 /* copy the necessary parts of the buffer */
769 MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len);
770 copied_total += buf_copy_len;
771 left += buf_copy_len;
772 len -= buf_copy_len;
773 offset = 0;
774 }
775 }
776 return copied_total;
777}
This page took 0.088341 seconds and 4 git commands to generate.