reassembler_packet.c

Go to the documentation of this file.
00001 /*
00002  * $Id: reassembler_packet.c 346460 2009-11-14 05:06:47Z ssiano $
00003  *
00004  * This code is provided as is by Juniper Networks SDK Developer Support.
00005  * It is provided with no warranties or guarantees, and Juniper Networks
00006  * will not provide support or maintenance of this code in any fashion.
00007  * The code is provided only to help a developer better understand how
00008  * the SDK can be used.
00009  *
00010  * Copyright (c) 2008, Juniper Networks, Inc.
00011  * All rights reserved.
00012  */
00013 
00021 #include <pthread.h>
00022 #include <signal.h>
00023 #include <stdbool.h>
00024 #include <stdlib.h>
00025 #include <string.h>
00026 #include <time.h>
00027 #include <unistd.h>
00028 #include <math.h>
00029 #include <sys/limits.h>
00030 #include <isc/eventlib.h>
00031 #include <sys/socket.h>
00032 #include <netinet/in.h>
00033 #include <arpa/inet.h>
00034 #include <netinet/in_systm.h>
00035 #include <netinet/ip.h>
00036 #include <jnx/aux_types.h>
00037 #include <jnx/bits.h>
00038 #include <jnx/mpsdk.h>
00039 #include <jnx/msp_objcache.h>
00040 #include <jnx/atomic.h>
00041 #include <jnx/msp_locks.h>
00042 #include <sys/jnx/jbuf.h>
00043 #include "reassembler_logging.h"
00044 #include "reassembler_packet.h"
00045 
00046 
00047 /*** Constants ***/
00048 
00049 #define SHARED_MEM_NAME "reassembler arena"    
00050 
00051 #define HASHTABLE_NAME "reassembler hash table" 
00052 
00053 #define TABLE_ENTRY_NAME "reassembler table entry" 
00054 
00055 #define FRAGMENT_ENTRY_NAME "reassembler fragment entry" 
00056 
00057 #define ENTRY_AGE_CHECK_INTERVAL 5 
00058 
00059 #define MAX_MSP_SEND_RETRIES 100 
00060 
00061 #define MIN_FIFO_DEPTH 1023 
00062 
00063 #define BYTES_PER_FRAG_BLOCK 8 
00064 
00065 #define WORDS_PER_FRAG_BLOCK 2 
00066 
00067 
00070 #define FLOW_BUCKET_COUNT (1024 * 64)
00071 
00075 const uint32_t HASH_MASK = FLOW_BUCKET_COUNT - 1;
00076 
00077 /*** Data Structures ***/
00078 
00084 typedef struct fragment_hole_s {
00085     uint16_t start;   
00086     uint16_t end;     
00087 } fragment_hole_t;
00088 
00089 
00093 typedef struct fragment_entry_s {
00094     fragment_hole_t   leading_hole;  
00095     struct jbuf *     jb;            
00096     fragment_hole_t   trailing_hole; 
00097     
00098     bool is_leading_hole;  
00099     bool is_trailing_hole; 
00100     
00101     // for list at this hash bucket:
00102     TAILQ_ENTRY(fragment_entry_s) entries; 
00103 } fragment_entry_t;
00104 
00105 
00109 typedef TAILQ_HEAD(fragment_list_s, fragment_entry_s) fragment_list_t;
00110 
00111 
00115 typedef struct table_entry_s {
00116     msp_spinlock_t               lock;       
00117     time_t                       age_ts;     
00118     
00119     in_addr_t                    saddr;      
00120     in_addr_t                    daddr;      
00121     uint16_t                     frag_group; 
00122     uint8_t                      protocol;   
00123     
00124     uint8_t                      free;       
00125     uint16_t                     total_len;  
00126     fragment_list_t              flist;      
00127 
00128     // for list at this hash bucket:
00129     TAILQ_ENTRY(table_entry_s)    entries;    
00130 } table_entry_t;
00131 
00132 
00136 typedef TAILQ_HEAD(ht_bucket_list_s, table_entry_s) ht_bucket_list_t;
00137 
00138 
00142 typedef struct hash_bucket_s {
00143     msp_spinlock_t        bucket_lock;    
00144     ht_bucket_list_t      bucket_entries; 
00145 } hash_bucket_t;
00146 
00147 
00158 typedef struct hashtable_s {
00159     hash_bucket_t hash_bucket[FLOW_BUCKET_COUNT]; 
00160 } hashtable_t;
00161 
00162 
00163 extern uint16_t         reassembler_mtu;  
00164 
00165 static evTimerID        aging_timer;   
00166 static msp_shm_handle_t shm_handle;    
00167 static msp_oc_handle_t  table_handle;  
00168 static msp_oc_handle_t  entry_handle;  
00169 static msp_oc_handle_t  frag_handle;   
00170 static hashtable_t *    flows_table;   
00171 static atomic_uint_t    loops_running; 
00172 static volatile uint8_t do_shutdown;   
00173 static uint32_t         obj_cache_id;  
00174 
00175 
00176 /*** STATIC/INTERNAL Functions ***/
00177 
00178 
00195 static void
00196 aging_cleanup(evContext ctx UNUSED,
00197               void * uap UNUSED,
00198               struct timespec due UNUSED,
00199               struct timespec inter UNUSED)
00200 {
00201     const time_t flow_duration = 30;
00202 
00203     uint32_t i, cpu;
00204     hash_bucket_t * bucket;
00205     table_entry_t * entry, * next;
00206     time_t current_time, entry_timeout;
00207     struct timeval curtime;
00208 
00209     cpu = msp_get_current_cpu();
00210     
00211     if(!lw_getsystimes(&curtime, NULL)) {
00212         current_time = curtime.tv_sec;
00213     } else {
00214         LOG(LOG_ERR, "%s: Cannot get a timestamp", __func__);
00215         return;
00216     }
00217     
00218     entry_timeout = current_time - flow_duration;
00219 
00220     for(i = 0; i < FLOW_BUCKET_COUNT; ++i) {
00221 
00222         bucket = &flows_table->hash_bucket[i];
00223 
00224         // Get the bucket lock
00225         INSIST_ERR(msp_spinlock_lock(&bucket->bucket_lock) == MSP_OK);
00226 
00227         entry = TAILQ_FIRST(&bucket->bucket_entries);
00228 
00229         while(entry != NULL) {
00230 
00231             // keep next to safely remove from list
00232             next = TAILQ_NEXT(entry, entries);
00233 
00234             // Get the entry lock
00235             INSIST_ERR(msp_spinlock_lock(&entry->lock) == MSP_OK);
00236 
00237             // check for timeout/expiry or free flag
00238             if(entry->free || entry->age_ts < entry_timeout) {
00239                 TAILQ_REMOVE(&bucket->bucket_entries, entry, entries);
00240                 msp_objcache_free(entry_handle, entry, cpu, obj_cache_id);
00241             } else {
00242                 // Release the entry lock
00243                 INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00244             }
00245             entry = next;
00246         }
00247 
00248         // Release the bucket lock
00249         INSIST_ERR(msp_spinlock_unlock(&bucket->bucket_lock) == MSP_OK);
00250     }
00251 
00252     msp_objcache_reclaim(shm_handle);
00253 }
00254 
00255 
00278 static void
00279 checksum_adjust(
00280     unsigned char * chksum,
00281     unsigned char * optr,
00282     int olen,
00283     unsigned char * nptr,
00284     int nlen)
00285 {
00286     long x, old, new_;
00287     x=chksum[0]*256+chksum[1];
00288     x=~x & 0xFFFF;
00289     while (olen)
00290     {
00291         old=optr[0]*256+optr[1]; optr+=2;
00292         x-=old & 0xffff;
00293         if (x<=0) { x--; x&=0xffff; }
00294         olen-=2;
00295     }
00296     while (nlen)
00297     {
00298         new_=nptr[0]*256+nptr[1]; nptr+=2;
00299         x+=new_ & 0xffff;
00300         if (x & 0x10000) { x++; x&=0xffff; }
00301         nlen-=2;
00302     }
00303     x=~x & 0xFFFF;
00304     chksum[0]=x/256; chksum[1]=x & 0xff;
00305 }
00306 
00307 
00317 static void
00318 send_packet(struct jbuf * pkt_buf,
00319             const msp_data_handle_t const * handle)
00320 {
00321     // enqueue it back into the FIFO to go out
00322 
00323     int rc = MSP_DATA_SEND_RETRY;
00324     int retries = 0;
00325 
00326     while(rc == MSP_DATA_SEND_RETRY && ++retries <= MAX_MSP_SEND_RETRIES) {
00327         rc = msp_data_send(*handle, pkt_buf, MSP_MSG_TYPE_PACKET);
00328     }
00329 
00330     if(rc == MSP_DATA_SEND_FAIL) {
00331 
00332         DLOG(LOG_ERR, "%s: Failed to forward packet using msp_data_send().",
00333             __func__);
00334         jbuf_free(pkt_buf);
00335         
00336     } else if(rc == MSP_DATA_SEND_RETRY) {
00337 
00338         DLOG(LOG_ERR, "%s: Failed to send a jbuf after %d retries "
00339             "with msp_data_send().", __func__, MAX_MSP_SEND_RETRIES);
00340         jbuf_free(pkt_buf);
00341         
00342     } else if(rc != MSP_OK) {
00343 
00344         DLOG(LOG_ERR, "%s: Failed to forward packet and got unknown return "
00345             "code from msp_data_send().", __func__);
00346         jbuf_free(pkt_buf);
00347     }
00348 }
00349 
00350 
00365 static status_t
00366 pullup_bytes(struct jbuf ** pkt_buf, uint16_t num_bytes)
00367 {
00368     struct jbuf * tmp_buf;
00369 
00370     if((*pkt_buf)->jb_len < num_bytes) {
00371         tmp_buf = jbuf_pullup((*pkt_buf), num_bytes);
00372 
00373         if(!tmp_buf) { // check it didn't fail
00374             return EFAIL;
00375         }
00376 
00377         *pkt_buf = tmp_buf;
00378     }
00379     return SUCCESS;
00380 }
00381 
00382 
00396 static void
00397 send_fragment_list(table_entry_t * entry,
00398                    const int cpu,
00399                    const msp_data_handle_t const * handle)
00400 {
00401     struct jbuf * pkt_buf, * ip_hdr, * tmp;
00402     fragment_entry_t * fe;
00403     struct ip * ip_pkt;
00404     uint16_t len = 0;
00405     uint16_t orig_ip_offset = htons(IP_MF);
00406     uint16_t new_len;
00407     uint16_t cur_frag_off = 0;
00408     uint16_t offset_increment;
00409     uint16_t mtu; // mtu incl. IP hdr + IP PL
00410     
00411     pkt_buf = jbuf_get();
00412     
00413     fe = TAILQ_FIRST(&entry->flist);
00414     if(!fe) {
00415         DLOG(LOG_ERR, "%s: found nothing in list", __func__);
00416         return;
00417     }
00418     TAILQ_REMOVE(&entry->flist, fe, entries);
00419     
00420     ip_hdr = fe->jb;
00421     if(pullup_bytes(&ip_hdr, sizeof(struct ip))) {
00422         DLOG(LOG_ERR, "%s: Not enough bytes to form an IP header because"
00423                 "because a pullup failed (0).", __func__);
00424         goto failure;
00425     }
00426     ip_pkt = jbuf_to_d(pkt_buf, struct ip *);
00427     
00428     // fix up the length to always be reassembler_mtu
00429     // max payload len in a fragment has to be divisible by BYTES_PER_FRAG_BLOCK
00430     new_len = reassembler_mtu - (ip_pkt->ip_hl * sizeof(uint32_t));
00431     offset_increment = new_len / BYTES_PER_FRAG_BLOCK; // max PL frag blocks
00432     new_len = offset_increment * BYTES_PER_FRAG_BLOCK; // max PL len in bytes
00433     mtu = new_len + (ip_pkt->ip_hl * sizeof(uint32_t));
00434     
00435     mtu = htons(mtu);
00436     checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00437         (unsigned char *)&ip_pkt->ip_len, sizeof(uint16_t),
00438         (unsigned char *)&mtu, sizeof(uint16_t));
00439     
00440     ip_pkt->ip_len = mtu;
00441     mtu = ntohs(mtu);
00442     
00443     while(1) {
00444         
00445         len = jbuf_total_len(pkt_buf) + jbuf_total_len(fe->jb);
00446         if(len < mtu) {
00447             // safe to glue the 2 together; don't send it yet though
00448             jbuf_cat(pkt_buf, fe->jb);
00449 
00450         } else if(len == mtu) {
00451             
00452             // safe to glue the 2 together; send it now
00453             jbuf_cat(pkt_buf, fe->jb);
00454 
00455             if(pullup_bytes(&pkt_buf, sizeof(struct ip))) {
00456                 DLOG(LOG_ERR, "%s: Not enough bytes to form an IP header "
00457                         "because a pullup failed (1).", __func__);
00458                 goto failure;
00459             }
00460             ip_pkt = jbuf_to_d(pkt_buf, struct ip *);
00461             
00462             if(!TAILQ_EMPTY(&entry->flist)) { // if there's more to send...
00463                 // save IP header in another jbuf for next fragment to send
00464 
00465                 ip_hdr = jbuf_copychain(pkt_buf, 0, ip_pkt->ip_hl * 
00466                         sizeof(uint32_t));
00467                 if(ip_hdr) {
00468                     DLOG(LOG_ERR, "%s: Failed to copy IP header from packet"
00469                             " into a new buffer (1).", __func__);
00470                     goto failure;
00471                 }
00472                 
00473                 ip_pkt->ip_off = htons(cur_frag_off);
00474                 // ip_pkt->ip_off |= IP_MF should already be set
00475                 
00476                 cur_frag_off += offset_increment; // keep track of where we are
00477                 
00478                 checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00479                     (unsigned char *)&orig_ip_offset, sizeof(uint16_t),
00480                     (unsigned char *)&ip_pkt->ip_off, sizeof(uint16_t));
00481                 
00482                 // send it
00483                 send_packet(pkt_buf, handle);
00484                 pkt_buf = ip_hdr;
00485                 
00486             } else { // there's nothing left to send after this
00487                 // send it
00488                 
00489                 ip_pkt->ip_off = htons(cur_frag_off);
00490                 ip_pkt->ip_off &= ~IP_MF; // unset MF
00491                 
00492                 checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00493                     (unsigned char *)&orig_ip_offset, sizeof(uint16_t),
00494                     (unsigned char *)&ip_pkt->ip_off, sizeof(uint16_t));
00495                 
00496                 send_packet(pkt_buf, handle);
00497             }
00498             
00499         } else { // len > mtu
00500             // just add some of fe->jb to pkt_buf and send it
00501             
00502             len = mtu - jbuf_total_len(pkt_buf); // room left
00503             
00504             tmp = jbuf_split(fe->jb, len);
00505             if(tmp == NULL) {
00506                 DLOG(LOG_ERR, "%s: Error splitting jbuf", __func__);
00507                 jbuf_free(fe->jb);
00508                 goto failure;
00509             }
00510             
00511             jbuf_cat(pkt_buf, fe->jb);
00512             
00513             // deal with remainder of the fragment that we split
00514             
00515             // save IP header in another jbuf for next fragment to send
00516             if(pullup_bytes(&pkt_buf, sizeof(struct ip))) {
00517                 DLOG(LOG_ERR, "%s: Not enough bytes to form an IP header "
00518                         "because a pullup failed (2).", __func__);
00519                 goto failure;
00520             }
00521             ip_pkt = jbuf_to_d(pkt_buf, struct ip *);
00522             ip_hdr = jbuf_copychain(pkt_buf, 0, ip_pkt->ip_hl * 
00523                     sizeof(uint32_t));
00524             if(ip_hdr) {
00525                 DLOG(LOG_ERR, "%s: Failed to copy IP header from packet into a "
00526                         "new buffer (2).", __func__);
00527                 goto failure;
00528             }
00529             
00530             ip_pkt->ip_off = htons(cur_frag_off);
00531             // ip_pkt->ip_off |= IP_MF should already be set
00532             
00533             cur_frag_off += offset_increment; // keep track of where we are
00534             
00535             checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00536                 (unsigned char *)&orig_ip_offset, sizeof(uint16_t),
00537                 (unsigned char *)&ip_pkt->ip_off, sizeof(uint16_t));
00538             
00539             // send it
00540             send_packet(pkt_buf, handle);
00541             pkt_buf = ip_hdr;
00542             
00543             // fix fe->jb to point to the remainder after the split
00544             fe->jb = tmp;
00545             
00546             continue; // continue with fe because there's some more left in it
00547         }
00548         
00549         // we can free fe and get the next one
00550         
00551         msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00552         
00553         fe = TAILQ_FIRST(&entry->flist); // go to next fragment in list
00554         if(!fe && pkt_buf) { // list is empty & still something in the buffer
00555             // send what we have since it is all that is left
00556             
00557             // fix up the length to always be what ever was left
00558             new_len = jbuf_total_len(pkt_buf);
00559             
00560             DLOG(LOG_INFO, "%s: Sending reassembled packet of length %d",
00561                     __func__, new_len);
00562             
00563             new_len = htons(new_len);
00564             
00565             checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00566                 (unsigned char *)&ip_pkt->ip_len, sizeof(uint16_t),
00567                 (unsigned char *)&new_len, sizeof(uint16_t));
00568             
00569             ip_pkt->ip_len = new_len;
00570             
00571             // fix up the offset
00572             ip_pkt->ip_off = htons(cur_frag_off);
00573             ip_pkt->ip_off &= ~IP_MF; // unset MF
00574             
00575             checksum_adjust((unsigned char *)&ip_pkt->ip_sum,
00576                 (unsigned char *)&orig_ip_offset, sizeof(uint16_t),
00577                 (unsigned char *)&ip_pkt->ip_off, sizeof(uint16_t));
00578             
00579             send_packet(pkt_buf, handle);
00580             
00581             break;
00582         } else {
00583             TAILQ_REMOVE(&entry->flist, fe, entries);
00584         }
00585     }
00586 
00587     return;
00588     
00589 failure:
00590 
00591     if(pkt_buf) {
00592         jbuf_free(pkt_buf);
00593     }
00594     msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00595     
00596     while((fe = TAILQ_FIRST(&entry->flist)) != NULL) {
00597         TAILQ_REMOVE(&entry->flist, fe, entries);
00598         msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00599     }
00600 }
00601 
00602 
00621 static status_t
00622 process_fragment(struct jbuf * pkt_buf,
00623                  const int cpu,
00624                  const msp_data_handle_t const * handle)
00625 {
00626     struct jbuf * tmp;
00627     struct ip * ip_pkt = jbuf_to_d(pkt_buf, struct ip *);
00628     uint32_t hash;
00629     hash_bucket_t * bucket;
00630     table_entry_t * entry;
00631     struct timeval curtime;
00632     fragment_entry_t * fe, * fe_tmp, * next;
00633     uint8_t hdr_len = ip_pkt->ip_hl * sizeof(uint32_t);
00634     uint16_t payload_len = (ntohs(ip_pkt->ip_len) - hdr_len);
00635     boolean more_fragments = (ip_pkt->ip_off & IP_MF) ? true : false;
00636     uint16_t ip_id = ip_pkt->ip_id;
00637     uint16_t offset = ntohs(ip_pkt->ip_off & IP_OFFMASK);
00638     uint16_t payload_end = 0, old_end = 0;
00639     uint16_t overlap_bytes = 0;
00640     boolean more_holes = false, fragment_inserted = false;
00641     
00642     // get hash of the just the dest & src addresses + IP protocol + IP ID:
00643     //   xor l3 hash with id and trim to hash output width
00644     hash = (ip_id ^ pkt_buf->jb_l3_hash) & HASH_MASK;
00645 
00646     // use hash to lookup a hash bucket and find the matching entry
00647     bucket = &flows_table->hash_bucket[hash];
00648 
00649     // Get the bucket lock
00650     INSIST_ERR(msp_spinlock_lock(&bucket->bucket_lock) == MSP_OK);
00651 
00652     entry = TAILQ_FIRST(&bucket->bucket_entries);
00653 
00654     while(entry != NULL) { // not likely many entries per bucket
00655         if(entry->daddr == ip_pkt->ip_dst.s_addr &&
00656            entry->saddr == ip_pkt->ip_src.s_addr &&
00657            entry->protocol == ip_pkt->ip_p &&
00658            entry->frag_group == ip_pkt->ip_id) {
00659 
00660             break; // match
00661         }
00662         entry = TAILQ_NEXT(entry, entries);
00663     }
00664 
00665     if(entry == NULL) {
00666         // if there's no matching entry, create one
00667         // we haven't seen a fragment yet (in flow)
00668 
00669         entry = msp_objcache_alloc(entry_handle, cpu, obj_cache_id);
00670         if(entry == NULL) {
00671             // Release the bucket lock
00672             INSIST_ERR(msp_spinlock_unlock(&bucket->bucket_lock) == MSP_OK);
00673             jbuf_free(pkt_buf);
00674             DLOG(LOG_ERR, "%s: Failed to allocate object cache for an entry.",
00675                     __func__);
00676             return EFAIL;
00677         }
00678 
00679         // init and grab lock
00680         msp_spinlock_init(&entry->lock);
00681         INSIST_ERR(msp_spinlock_lock(&entry->lock) == MSP_OK);
00682 
00683         TAILQ_INSERT_HEAD(&bucket->bucket_entries, entry, entries);
00684 
00685         // build key 
00686         entry->daddr = ip_pkt->ip_dst.s_addr;
00687         entry->saddr = ip_pkt->ip_src.s_addr;
00688         entry->protocol = ip_pkt->ip_p;
00689         entry->frag_group = ip_id;
00690         
00691         // Release the bucket lock
00692         INSIST_ERR(msp_spinlock_unlock(&bucket->bucket_lock) == MSP_OK);
00693         
00694         // init the rest of entry
00695         
00696         entry->total_len = 0;
00697         entry->free = 0;
00698         TAILQ_INIT(&entry->flist);
00699         
00700         if(!lw_getsystimes(&curtime, NULL)) {
00701             entry->age_ts = curtime.tv_sec; 
00702         } else {
00703             DLOG(LOG_EMERG, "%s: Cannot get a timestamp", __func__);
00704         }
00705 
00706         DLOG(LOG_INFO, "%s: Created entry for id %d", __func__, ip_id);
00707     } else {
00708         // else there's a matching entry, so use it
00709         
00710         // Get the flow lock
00711         INSIST_ERR(msp_spinlock_lock(&entry->lock) == MSP_OK);
00712 
00713         // Release the bucket lock
00714         INSIST_ERR(msp_spinlock_unlock(&bucket->bucket_lock) == MSP_OK);
00715     }
00716 
00717     // put this jbuf in the fragment list
00718     fe = msp_objcache_alloc(frag_handle, cpu, obj_cache_id);
00719     if(fe == NULL) {
00720         // Release the entry lock
00721         INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00722         jbuf_free(pkt_buf);
00723         DLOG(LOG_ERR, "%s: Failed to allocate object cache for a fragment"
00724                 " entry.", __func__);
00725         return EFAIL;
00726     }
00727     
00728     fe->jb = jbuf_dup(pkt_buf);
00729     jbuf_free(pkt_buf);
00730     
00731     if(fe->jb == NULL) {
00732         INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00733         msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00734         DLOG(LOG_ERR, "%s: Error dupping jbuf (1)", __func__);
00735         return EFAIL;
00736     }
00737     
00738     // Chop off IP header if it is not the first fragment
00739     if(offset != 0) {
00740         tmp = jbuf_split(fe->jb, hdr_len);
00741         if(tmp != NULL) {
00742             jbuf_free(fe->jb);
00743             fe->jb = tmp;
00744         } else {
00745             INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00746             jbuf_free(fe->jb);
00747             msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00748             DLOG(LOG_ERR, "%s: Error splitting jbuf (1)", __func__);
00749             return EFAIL;
00750         }
00751         
00752         payload_end = offset + (int)(ceil( // round up for last fragment case
00753                 (float)payload_len / BYTES_PER_FRAG_BLOCK));
00754         
00755     } else {
00756         payload_end = offset + (payload_len / BYTES_PER_FRAG_BLOCK);
00757 
00758         // this is the first frag..so save the IP header
00759         // add the IP hdr len back to payload_len because we're keeping it
00760         payload_len += hdr_len;
00761     }
00762     
00763     // find where to insert this fragment in the fragment list
00764     
00765     fe_tmp = TAILQ_FIRST(&entry->flist);
00766     
00767     /*
00768      * Debug
00769      *
00770     DLOG(LOG_INFO, "%s: id: %d, hl:%d, pl:%d, of:%d%s, pe:%d", __func__, ip_id,
00771             hdr_len, payload_len, offset, more_fragments?"+":"", payload_end);
00772      */
00773     
00774     if(!fe_tmp) { // list is empty ... easy case
00775         
00776         // now put the holes in
00777         if(offset == 0) { // It's the first fragment
00778             fe->is_leading_hole = false;
00779         } else {
00780             fe->is_leading_hole = true;
00781             fe->leading_hole.start = 0;
00782             fe->leading_hole.end = offset;
00783         }
00784             
00785         if(!more_fragments) { // It's the last fragment
00786             fe->is_trailing_hole = false;
00787         } else {
00788             fe->is_trailing_hole = true;
00789             if(offset == 0)
00790                 fe->trailing_hole.start = offset +  
00791                     ((payload_len - hdr_len) / BYTES_PER_FRAG_BLOCK);
00792             else 
00793                 fe->trailing_hole.start = offset +  
00794                     (payload_len / BYTES_PER_FRAG_BLOCK);
00795             fe->trailing_hole.end = USHRT_MAX;
00796         }
00797         
00798         TAILQ_INSERT_HEAD(&entry->flist, fe, entries);
00799         more_holes = true;
00800     
00801     } else if(fe_tmp->is_leading_hole) { // hole at front of list
00802         
00803         // handle overlap first (hopefully rare)
00804         if(offset < fe_tmp->leading_hole.end &&
00805                 fe_tmp->leading_hole.end < payload_end) {
00806             
00807             // some of it is before fe_tmp, but not all of it
00808             // trim the part we already have from the end of the fragment
00809             
00810             overlap_bytes = (payload_end - fe_tmp->leading_hole.end) *
00811                     BYTES_PER_FRAG_BLOCK;
00812             
00813             // recalculate desired payload
00814             payload_len -= overlap_bytes;
00815             payload_end = fe_tmp->leading_hole.end;
00816             
00817             tmp = jbuf_split(fe->jb, payload_len); // trim
00818             
00819             DLOG(LOG_INFO, "%s: Trimmed %d trailing overlap bytes (1)",
00820                     __func__, overlap_bytes);
00821             
00822             if(tmp != NULL) {
00823                 jbuf_free(tmp); // free overlap
00824             } else {
00825                 INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00826                 jbuf_free(fe->jb);
00827                 msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00828                 DLOG(LOG_ERR, "%s: Error splitting jbuf (2)", __func__);
00829                 return EFAIL;
00830             }
00831             
00832             // now payload_end == fe_tmp->leading_hole.end
00833             // we will process fe in the next if statement
00834         }
00835             
00836         if(fe_tmp->leading_hole.end >= payload_end) { // fe is before fe_tmp
00837             
00838             TAILQ_INSERT_HEAD(&entry->flist, fe, entries);
00839             fragment_inserted = true;
00840             
00841             if(offset == 0) {
00842                 fe->is_leading_hole = false;
00843             } else {
00844                 fe->is_leading_hole = true;
00845                 fe->leading_hole.start = 0;
00846                 fe->leading_hole.end = offset;
00847                 more_holes = true;
00848             }
00849             
00850             if(payload_end == fe_tmp->leading_hole.end) {
00851                 // fe comes right before fe_tmp
00852                 fe_tmp->is_leading_hole = false;
00853                 fe->is_trailing_hole = false;
00854             } else {
00855                 // fe comes before fe_tmp but with a hole in between
00856                 fe->is_trailing_hole = true;
00857                 fe->trailing_hole.start = payload_end;
00858                 fe_tmp->leading_hole.start = payload_end;
00859                 fe->trailing_hole.end = fe_tmp->leading_hole.end;
00860                 more_holes = true;
00861             }
00862             fe_tmp = fe;
00863         }
00864     }
00865     
00866     if(!more_holes) {
00867         
00868         while(!fragment_inserted && fe_tmp != NULL) {
00869 
00870             if(!fe_tmp->is_trailing_hole) {
00871                 // fe is entirely past fe_tmp and not next to it
00872                 fe_tmp = TAILQ_NEXT(fe_tmp, entries);
00873                 continue;
00874             }
00875             
00876             // else there is a trailing hole
00877             
00878             if(offset >= fe_tmp->trailing_hole.end) {
00879                 // it doesn't fit anywhere in this hole
00880                 fe_tmp = TAILQ_NEXT(fe_tmp, entries);
00881                 more_holes = true; // indicate we saw a hole
00882                 continue;
00883             }
00884             
00885             if(payload_end <= fe_tmp->trailing_hole.start) {
00886                 // fe fragment is useless to us, we already have the content
00887                 more_holes = true; // indicate we saw a hole
00888                 break;
00889             }
00890             
00891             // else: it fits somewhere in this hole...find where
00892             // and then break out of the loop
00893             
00894             // handle leading overlap first (hopefully rare)
00895             // beginning of fe overlaps with some of fe_tmp
00896             if(offset < fe_tmp->trailing_hole.start) {
00897                 
00898                 // trim the part we already have from the start
00899 
00900                 overlap_bytes = (fe_tmp->trailing_hole.start - offset) *
00901                         BYTES_PER_FRAG_BLOCK;
00902                 
00903                 // recalculate desired payload and offset
00904                 offset = fe_tmp->trailing_hole.start;
00905                 payload_len -= overlap_bytes;
00906                 
00907                 tmp = jbuf_split(fe->jb, overlap_bytes); // trim
00908                 
00909                 DLOG(LOG_INFO, "%s: Trimmed %d leading overlap bytes",
00910                         __func__, overlap_bytes);
00911                 
00912                 if(tmp != NULL) {
00913                     jbuf_free(fe->jb); // free overlap
00914                     fe->jb = tmp;
00915                 } else {
00916                     INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00917                     jbuf_free(fe->jb);
00918                     msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00919                     DLOG(LOG_ERR, "%s: Error splitting jbuf (3)", __func__);
00920                     return EFAIL;
00921                 }
00922                 
00923                 // now offset == fe_tmp->leading_hole.start
00924             }
00925             
00926             // fe comes somewhere after fe_tmp
00927             TAILQ_INSERT_AFTER(&entry->flist, fe_tmp, fe, entries);
00928             fragment_inserted = true;
00929             
00930             old_end = fe_tmp->trailing_hole.end;
00931             
00932             if(fe_tmp->trailing_hole.start == offset) {
00933                 // fe comes right after fe_tmp
00934                 fe_tmp->is_trailing_hole = false;
00935                 fe->is_leading_hole = false;
00936             } else {
00937                 fe->is_leading_hole = true;
00938                 fe->leading_hole.start= fe_tmp->trailing_hole.start;
00939                 fe->leading_hole.end = offset;
00940                 fe_tmp->trailing_hole.end = offset;
00941                 more_holes = true;
00942             }
00943             
00944             if(!more_fragments) { // it is the last fragment
00945                 fe->is_trailing_hole = false;
00946                 
00947                 // in case there is anything after fe, we can get rid of it
00948                 fe_tmp = TAILQ_NEXT(fe, entries);
00949                 while(fe_tmp != NULL) {
00950                     next = TAILQ_NEXT(fe_tmp, entries);
00951                     jbuf_free(fe_tmp->jb);
00952                     TAILQ_REMOVE(&entry->flist, fe_tmp, entries);
00953                     msp_objcache_free(frag_handle, fe_tmp, cpu, obj_cache_id);
00954                     fe_tmp = next;
00955                     DLOG(LOG_INFO, "%s: Discarded a useless fragment from end "
00956                             "of chain", __func__);
00957                 }
00958             } else {
00959                 
00960                 // handle trailing overlap with next frag (hopefully rare)
00961                 if(payload_end > old_end) {
00962                     
00963                     // trim the part we already have from the end
00964                     
00965                     overlap_bytes = (payload_end - old_end) *
00966                             BYTES_PER_FRAG_BLOCK;
00967                     
00968                     // recalculate desired payload
00969                     payload_len -= overlap_bytes;
00970                     payload_end = old_end;
00971                     
00972                     tmp = jbuf_split(fe->jb, payload_len); // trim
00973                     
00974                     DLOG(LOG_INFO, "%s: Trimmed %d trailing overlap bytes (2)",
00975                             __func__, overlap_bytes);
00976                     
00977                     if(tmp != NULL) {
00978                         jbuf_free(tmp); // free overlap
00979                     } else {
00980                         TAILQ_REMOVE(&entry->flist, fe, entries);
00981                         
00982                         // restore fe_tmp back to how it was
00983                         next = TAILQ_NEXT(fe_tmp, entries);
00984                         fe_tmp->is_trailing_hole = true;
00985                         if(next)
00986                             fe_tmp->trailing_hole.end = next->leading_hole.end;
00987                         else
00988                             fe_tmp->trailing_hole.end = USHRT_MAX;
00989                         
00990                         INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
00991                         jbuf_free(fe->jb);
00992                         msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
00993 
00994                         DLOG(LOG_ERR, "%s: Error splitting jbuf (4)", __func__);
00995                         return EFAIL;
00996                     }
00997                     
00998                     // now payload_end == fe_tmp->trailing_hole.end
00999                     // we will process fe in the next if statement
01000                 }
01001                 
01002                 // calculate the trailing hole for fe and then
01003                 // the leading hole for fe->next 
01004                 fe->trailing_hole.start = payload_end;
01005                 fe->trailing_hole.end = old_end;
01006                 
01007                 fe_tmp = TAILQ_NEXT(fe, entries);
01008                 
01009                 if(fe->trailing_hole.start == fe->trailing_hole.end) {
01010                     // filled the entire hole
01011                     fe->is_trailing_hole = false;
01012                     // insist on the following b/c trailing_hole.end
01013                     // would be infinity otherwise
01014                     INSIST_ERR(fe_tmp != NULL);
01015                     fe_tmp->is_leading_hole = false;
01016                     // will break below and scan the rest faster for holes
01017                 } else {
01018                     // filled some of the hole
01019                     fe->is_trailing_hole = true;
01020                     if(fe_tmp)
01021                         fe_tmp->leading_hole.start = fe->trailing_hole.start;
01022                     more_holes = true;
01023                     fe_tmp = NULL; // no point of scanning more
01024                 }
01025             }
01026             break;
01027         }
01028         
01029         // scan anything else quicker
01030         while(!more_holes && fe_tmp != NULL) {
01031             if(fe_tmp->is_trailing_hole) {
01032                 more_holes = true;
01033                 break;
01034             }
01035             fe_tmp = TAILQ_NEXT(fe_tmp, entries);
01036         }
01037         
01038         if(!more_holes) { // Done reassembly
01039             
01040             entry->total_len += payload_len;
01041             
01042             // Send out what we have reassembled
01043             
01044             DLOG(LOG_INFO, "%s: Sending a reassembled fragment", __func__);
01045             
01046             send_fragment_list(entry, cpu, handle);
01047             
01048             entry->free = 1; // flag telling the ager to clean this up
01049             
01050             // Release the entry lock
01051             INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
01052             
01053             return SUCCESS;
01054         }
01055         
01056         if(!fragment_inserted) {
01057             // this packet was a complete overlap of what we already had 
01058             jbuf_free(fe->jb);
01059             msp_objcache_free(frag_handle, fe, cpu, obj_cache_id);
01060             DLOG(LOG_INFO, "%s: Discarded a useless fragment "
01061                     "(complete overlap)", __func__);
01062         }
01063     }
01064     
01065     /*
01066      * Print list (Debug):
01067      * 
01068     
01069     fe_tmp = TAILQ_FIRST(&entry->flist);
01070     while(fe_tmp != NULL) {
01071         DLOG(LOG_INFO, "len: %d", jbuf_total_len(fe_tmp->jb));
01072         if(fe_tmp->is_leading_hole) {
01073             DLOG(LOG_INFO, "leading: %d - %d", fe_tmp->leading_hole.start, fe_tmp->leading_hole.end);
01074         }
01075         if(fe_tmp->is_trailing_hole) {
01076             DLOG(LOG_INFO, "trailing: %d - %d", fe_tmp->trailing_hole.start, fe_tmp->trailing_hole.end);
01077         }
01078         fe_tmp = TAILQ_NEXT(fe_tmp, entries);
01079     }
01080     DLOG(LOG_INFO, "Done printing fragment list");
01081     
01082     */
01083 
01084     if(!lw_getsystimes(&curtime, NULL)) {
01085         entry->age_ts = curtime.tv_sec; 
01086     } else {
01087         DLOG(LOG_EMERG, "%s: Cannot get a timestamp", __func__);
01088     }
01089 
01090     entry->total_len += payload_len;
01091     
01092     // Release the entry lock
01093     INSIST_ERR(msp_spinlock_unlock(&entry->lock) == MSP_OK);
01094 
01095     return SUCCESS;
01096 }
01097 
01098 
01106 static void *
01107 reassembler_process_packet(msp_dataloop_args_t * params)
01108 {
01109     struct jbuf * pkt_buf;
01110     struct ip * ip_pkt;
01111     int type, cpu;
01112     sigset_t sig_mask;
01113 
01114     // Block SIGTERM to this thread/main thread will handle otherwise we inherit
01115     // this behaviour in our threads sigmask and the signal might come here
01116     sigemptyset(&sig_mask);
01117     sigaddset(&sig_mask, SIGTERM);
01118     pthread_sigmask(SIG_BLOCK, &sig_mask, NULL);
01119 
01120     atomic_add_uint(1, &loops_running);
01121 
01122     cpu = msp_data_get_cpu_num(params->dhandle);
01123     INSIST_ERR(cpu != MSP_NEXT_NONE);
01124     
01125     // Start the packet loop...
01126     while(!do_shutdown) {
01127 
01128         // Dequeue a packet from the rx-fifo
01129         pkt_buf = msp_data_recv(params->dhandle, &type);
01130 
01131         if(pkt_buf == NULL) { // Didn't get anything
01132             continue;
01133         }
01134 
01135         if(type != MSP_MSG_TYPE_PACKET) { // Didn't get network traffic
01136             DLOG(LOG_WARNING, "%s: Message wasn't a packet...dropping",
01137                 __func__);
01138             jbuf_free(pkt_buf);
01139             continue;
01140         }
01141 
01142         if(pullup_bytes(&pkt_buf, sizeof(struct ip))) {
01143 
01144             DLOG(LOG_ERR, "%s: Dropped a packet because there's not enough "
01145                 "bytes to form an IP header and a pullup failed.", __func__);
01146 
01147             jbuf_free(pkt_buf);
01148             continue;
01149         }
01150 
01151         // Get IP header
01152         ip_pkt = jbuf_to_d(pkt_buf, struct ip *);
01153 
01154         if(ip_pkt->ip_off & htons(IP_MF | IP_OFFMASK)) { // It's a fragment
01155 
01156             process_fragment(pkt_buf, cpu, &params->dhandle);
01157 
01158         } else {
01159 
01160             send_packet(pkt_buf, &params->dhandle);
01161         }
01162     }
01163 
01164     atomic_sub_uint(1, &loops_running);
01165 
01166     // thread is done if it reaches this point
01167     pthread_exit(NULL);
01168     return NULL;
01169 }
01170 
01171 
01172 /*** GLOBAL/EXTERNAL Functions ***/
01173 
01174 
01181 status_t
01182 init_packet_loops(evContext ctx)
01183 {
01184     int i, cpu, rc;
01185     msp_dataloop_params_t params;
01186     msp_dataloop_result_t result;
01187     msp_shm_params_t shmp;
01188     msp_objcache_params_t ocp;
01189 
01190     shm_handle = table_handle = entry_handle = flows_table = NULL;
01191     evInitID(&aging_timer);
01192     obj_cache_id = 0; // for now this can always be zero
01193 
01194     LOG(LOG_INFO, "%s: Initializing object cache for data loops", __func__);
01195 
01196     bzero(&shmp, sizeof(shmp));
01197     bzero(&ocp, sizeof(ocp));
01198 
01199     // allocate & initialize the shared memory
01200 
01201     strncpy(shmp.shm_name, SHARED_MEM_NAME, SHM_NAME_LEN);
01202 
01203     if(msp_shm_allocator_init(&shmp) != MSP_OK) {
01204         LOG(LOG_ERR, "%s: Shared memory allocator initialization failed",
01205                 __func__);
01206         return EFAIL;
01207     }
01208 
01209     shm_handle = shmp.shm; // get handle
01210     
01211     /*
01212     if(msp_trace_init(shm_handle)) { // need this for msp_log/DLOG
01213         LOG(LOG_ERR, "%s: Could not initialize shared memory for dCPU logging",
01214                 __func__);
01215         return EFAIL;
01216     }
01217     */
01218 
01219     // create object cache allocator for the flow look up table
01220     ocp.oc_shm = shm_handle;
01221     ocp.oc_size  = sizeof(hashtable_t);
01222     strncpy(ocp.oc_name, HASHTABLE_NAME, OC_NAME_LEN);
01223 
01224     if(msp_objcache_create(&ocp) != MSP_OK) {
01225         LOG(LOG_ERR, "%s: Object-cache allocator initialization failed (table)",
01226                 __func__);
01227         return EFAIL;
01228     }
01229 
01230     table_handle = ocp.oc; // get handle
01231 
01232     // create object cache allocator for the flow look up table entries
01233     ocp.oc_shm = shmp.shm;
01234     ocp.oc_size  = sizeof(table_entry_t);
01235     strncpy(ocp.oc_name, TABLE_ENTRY_NAME, OC_NAME_LEN);
01236 
01237     if (msp_objcache_create(&ocp) != MSP_OK) {
01238         LOG(LOG_ERR, "%s: Object-cache allocator initialization failed (entry)",
01239                 __func__);
01240         return EFAIL;
01241     }
01242 
01243     entry_handle = ocp.oc; // get handle
01244     
01245     // create object cache allocator for the fragment entries
01246     ocp.oc_shm = shmp.shm;
01247     ocp.oc_size  = sizeof(fragment_entry_t);
01248     strncpy(ocp.oc_name, FRAGMENT_ENTRY_NAME, OC_NAME_LEN);
01249 
01250     if (msp_objcache_create(&ocp) != MSP_OK) {
01251         LOG(LOG_ERR, "%s: Object-cache allocator initialization failed (entry)",
01252                 __func__);
01253         return EFAIL;
01254     }
01255 
01256     frag_handle = ocp.oc; // get handle
01257 
01258     // allocate flows_table in OC:
01259 
01260     flows_table = msp_objcache_alloc(table_handle, msp_get_current_cpu(),
01261             obj_cache_id);
01262     
01263     if(flows_table == NULL) {
01264         LOG(LOG_ERR, "%s: Failed to allocate object cache for flows table ",
01265                 __func__);
01266         return EFAIL;
01267     }
01268 
01269     for(i = 0; i < FLOW_BUCKET_COUNT; ++i) {
01270         INSIST_ERR(msp_spinlock_init(&flows_table->hash_bucket[i].bucket_lock)
01271                 == MSP_OK);
01272         TAILQ_INIT(&flows_table->hash_bucket[i].bucket_entries);
01273     }
01274 
01275     LOG(LOG_INFO, "%s: Starting packet loops...", __func__);
01276 
01277     bzero(&params, sizeof(msp_dataloop_params_t));
01278     bzero(&result, sizeof(msp_dataloop_result_t));
01279 
01280     loops_running = 0;
01281     do_shutdown = 0;
01282     
01283     // go through the available data CPUs and count them
01284     cpu = MSP_NEXT_NONE;
01285     i = 0; // count data CPUs
01286     while((cpu = msp_env_get_next_data_cpu(cpu)) != MSP_NEXT_END) {
01287         ++i;
01288     }
01289     
01290     if(i == 0) {
01291         LOG(LOG_ERR, "%s: No available data CPUs", __func__);
01292         return EFAIL;
01293     }
01294     
01295     cpu = MSP_NEXT_NONE;
01296     if((cpu = msp_env_get_next_data_cpu(cpu)) != MSP_NEXT_END) {
01297         
01298         // We actually bind this main thread of the process to a data CPU
01299         // This means we use up a dCPU without a FIFO and packet loop running 
01300         // on it, but on this dCPU, we guarantee that our thread runs in
01301         // real-time, that is, without preemption
01302         
01303         if(msp_process_bind(cpu)) {
01304             // This is bad because we need to acquire spinlocks
01305             LOG(LOG_ERR, "%s: Failed to bind the main thread of the process "
01306                     "to dCPU %d.", __func__, cpu);
01307             return EFAIL;
01308         } else {
01309              LOG(LOG_INFO, "%s: Bound the main thread of the process to dCPU %d",
01310                      __func__, cpu);
01311         }
01312     }
01313     
01314     
01315     // MIN_FIFO_DEPTH (1023) is the internal default depth FIFO depth
01316     // Here we scale the depth depending on the number of data loop...
01317     
01318     // If we have 21 dCPUs (max), then we would leave it at 1023, otherwise add
01319     // more space in the FIFOs because we will have less of them
01320     
01321     --i; // we used the first dCPU without a packet loop (above)
01322     
01323     params.dl_fifo_depth = (int)(MIN_FIFO_DEPTH * (21.0f / i));
01324     
01325     // create data loops on the remaining dCPUs
01326     while((cpu = msp_env_get_next_data_cpu(cpu)) != MSP_NEXT_END) {
01327 
01328         rc = msp_data_create_loop_on_cpu(cpu, reassembler_process_packet,
01329                 &params, &result);
01330         
01331         if (rc != MSP_OK) {
01332             LOG(LOG_ERR, "%s: Could not start data loop on dCPU %d (err: %d).",
01333                     __func__, cpu, rc);
01334         }
01335     }
01336     
01337     LOG(LOG_INFO, "%s: Started %d packet loops with FIFO depth of %d",
01338             __func__, i, params.dl_fifo_depth);
01339     
01340     // start ager on this ctrl thread... will run in real-time
01341 
01342     if(evSetTimer(ctx, aging_cleanup, NULL,
01343             evAddTime(evNowTime(), evConsTime(ENTRY_AGE_CHECK_INTERVAL, 0)),
01344             evConsTime(ENTRY_AGE_CHECK_INTERVAL, 0), &aging_timer)) {
01345 
01346         LOG(LOG_EMERG, "%s: Failed to initialize a timer to periodically "
01347             "check age of flow entries (Error: %m)", __func__);
01348         return EFAIL;
01349     }
01350 
01351     return SUCCESS;
01352 }
01353 
01354 
01361 void
01362 stop_packet_loops(evContext ctx)
01363 {
01364     do_shutdown = 1;
01365 
01366     if(evTestID(aging_timer)) {
01367         evClearTimer(ctx, aging_timer);
01368         evInitID(&aging_timer);
01369     }
01370 
01371     while(loops_running > 0) ; // note the spinning while waiting
01372 
01373     // loops must be all shutdown
01374 
01375     if(flows_table) {
01376         msp_objcache_free(
01377                 table_handle, flows_table, msp_get_current_cpu(), obj_cache_id);
01378         flows_table = NULL;
01379     }
01380 
01381     if(table_handle) {
01382         msp_objcache_destroy(table_handle);
01383         table_handle = NULL;
01384     }
01385 
01386     if(entry_handle) {
01387         msp_objcache_destroy(entry_handle);
01388         entry_handle = NULL;
01389     }
01390 }

2007-2009 Juniper Networks, Inc. All rights reserved. The information contained herein is confidential information of Juniper Networks, Inc., and may not be used, disclosed, distributed, modified, or copied without the prior written consent of Juniper Networks, Inc. in an express license. This information is subject to change by Juniper Networks, Inc. Juniper Networks, the Juniper Networks logo, and JUNOS are registered trademarks of Juniper Networks, Inc. in the United States and other countries. All other trademarks, service marks, registered trademarks, or registered service marks are the property of their respective owners.
Generated on Sun May 30 20:27:07 2010 for SDK Your Net Corporation IP Fragment Reassembly Example: reassembler 1.0 by Doxygen 1.5.1