/** * \brief Incoming notification, that service has finished processing bundle * \param bundlemem Pointer to the MMEM struct of the bundle */ void routing_chain_bundle_delivered_locally(struct mmem * bundlemem) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; struct bundle_t * bundle = (struct bundle_t *) MMEM_PTR(bundlemem); // Tell the agent to call us again to resubmit bundles routing_chain_schedule_resubmission(); if( bundle == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "flood_locally_delivered called with invalid pointer"); return; } // Find the bundle in our internal storage for( n = (struct routing_list_entry_t *) list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry->bundle_number == bundle->bundle_num ) { break; } } if( n == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "Bundle not in storage yet"); return; } // Unset the IN_DELIVERY flag entry->flags &= ~ROUTING_FLAG_IN_DELIVERY; // Unset the LOCAL flag entry->flags &= ~ROUTING_FLAG_LOCAL; // Unblock the receiving service delivery_unblock_service(bundlemem); // Free the bundle memory bundle_decrement(bundlemem); /* We count ourselves as node as well, so list us as receiver of a bundle copy */ if (entry->send_to < ROUTING_NEI_MEM) { linkaddr_copy(&entry->neighbours[entry->send_to], &linkaddr_node_addr); entry->send_to++; LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle %lu sent to %u nodes", entry->bundle_number, entry->send_to); } else if (entry->send_to >= ROUTING_NEI_MEM) { // Here we can delete the bundle from storage, because it will not be routed anyway LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle %lu sent to max number of nodes, deleting", entry->bundle_number); /* Unsetting the forward flag will make routing_flooding_check_keep_bundle delete the bundle */ entry->flags &= ~ROUTING_FLAG_FORWARD; } // Check remaining live of bundle routing_chain_check_keep_bundle(entry->bundle_number); }
/** * \brief Checks whether a bundle still has to be kept or can be deleted * \param bundle_number Number of the bundle */ void routing_chain_check_keep_bundle(uint32_t bundle_number) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; // Now we have to find the appropriate Storage struct for( n = (struct routing_list_entry_t *) list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry->bundle_number == bundle_number ) { break; } } if( n == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "Bundle not in storage yet"); return; } if( (entry->flags & ROUTING_FLAG_LOCAL) || (entry->flags & ROUTING_FLAG_FORWARD) ) { return; } LOG(LOGD_DTN, LOG_ROUTE, LOGL_INF, "Deleting bundle %lu", bundle_number); BUNDLE_STORAGE.del_bundle(bundle_number, REASON_DELIVERED); }
struct mmem * bundle_create_bundle() { int ret; struct bundle_slot_t *bs; struct bundle_t *bundle; bs = bundleslot_get_free(); if( bs == NULL ) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Could not allocate slot for a bundle"); return NULL; } ret = mmem_alloc(&bs->bundle, sizeof(struct bundle_t)); if (!ret) { bundleslot_free(bs); LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Could not allocate memory for a bundle"); return NULL; } bundle = (struct bundle_t *) MMEM_PTR(&bs->bundle); memset(bundle, 0, sizeof(struct bundle_t)); bundle->rec_time = xTaskGetTickCount(); bundle->num_blocks = 0; bundle->source_event_queue = dtn_process_get_event_queue(); configASSERT(bundle->source_event_queue != NULL); /* Bundles are created as singleton and with normal priority */ bundle->flags = BUNDLE_FLAG_SINGLETON | BUNDLE_PRIORITY_NORMAL; return &bs->bundle; }
/** * \brief Get the age of the bundle * \param bundlemem Bundle MMEM Pointer * \return Age in milliseconds */ uint32_t bundle_ageing_get_age(struct mmem * bundlemem) { struct bundle_t *bundle; udtn_timeval_t tv; if( bundlemem == NULL ) { return 0; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( bundle == NULL ) { return 0; } // Bundle has a timestamp and we have time sync if( bundle->tstamp != 0 && udtn_getclockstate() >= UDTN_CLOCK_STATE_GOOD ) { // Get current time udtn_gettimeofday(&tv); // Convert into DTN time tv.tv_sec -= UDTN_CLOCK_DTN_EPOCH_OFFSET; // If our clock is ahead of the bundle timestamp the age seems to be 0 if( tv.tv_sec < bundle->tstamp ) { return 0; } // Calculate age based on timestamp and current time return (tv.tv_sec - bundle->tstamp) * 1000 + tv.tv_usec / 1000; } // We have to rely on the age block information return bundle->aeb_value_ms + (xTaskGetTickCount() - bundle->rec_time) / portTICK_PERIOD_MS; }
int bundle_add_block(struct mmem *bundlemem, uint8_t type, uint8_t flags, uint8_t *data, int d_len) { struct bundle_t *bundle; struct bundle_block_t *block; uint8_t i; int n; n = mmem_realloc(bundlemem, bundlemem->size + d_len + sizeof(struct bundle_block_t)); if( !n ) { return -1; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); /* FIXME: Make sure we don't traverse outside of our allocated memory */ /* Go through the blocks until we're behind the last one */ block = (struct bundle_block_t *) bundle->block_data; for (i=0;i<bundle->num_blocks;i++) { /* None of these is the last block anymore */ block->flags &= ~BUNDLE_BLOCK_FLAG_LAST; block = (struct bundle_block_t *) &block->payload[block->block_size]; } block->type = type; block->flags = BUNDLE_BLOCK_FLAG_LAST | flags; block->block_size = d_len; bundle->num_blocks++; memcpy(block->payload, data, d_len); return d_len; }
uint8_t bundle_set_attr(struct mmem *bundlemem, uint8_t attr, const uint32_t* const val) { struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); LOG(LOGD_DTN, LOG_BUNDLE, LOGL_DBG, "set attr %lx",*val); switch (attr) { case FLAGS: bundle->flags = *val; bundle->custody = 0x08 &(uint8_t) *val; break; case DEST_NODE: bundle->dst_node = *val; break; case DEST_SERV: bundle->dst_srv = *val; break; case SRC_NODE: bundle->src_node = *val; break; case SRC_SERV: bundle->src_srv = *val; break; case REP_NODE: bundle->rep_node = *val; break; case REP_SERV: bundle->rep_srv = *val; break; case CUST_NODE: bundle->cust_node = *val; break; case CUST_SERV: bundle->cust_srv = *val; break; case TIME_STAMP: bundle->tstamp = *val; break; case TIME_STAMP_SEQ_NR: bundle->tstamp_seq = *val; break; case LIFE_TIME: bundle->lifetime = *val; break; case DIRECTORY_LEN: if (*val != 0) LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Dictionary length needs to be 0 for CBHE"); break; case FRAG_OFFSET: bundle->frag_offs = *val; break; case LENGTH: /* FIXME */ default: LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Unknown attribute"); return 0; } return 1; }
uint8_t bundle_get_attr_long(struct mmem *bundlemem, uint8_t attr, uint64_t *val) { struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); LOG(LOGD_DTN, LOG_BUNDLE, LOGL_DBG, "get attr: %d in %lx", attr, *val); switch (attr) { case FLAGS: *val = bundle->flags; break; case DEST_NODE: *val = bundle->dst_node; break; case DEST_SERV: *val = bundle->dst_srv; break; case SRC_NODE: *val = bundle->src_node; break; case SRC_SERV: *val = bundle->src_srv; break; case REP_NODE: *val = bundle->rep_node; break; case REP_SERV: *val = bundle->rep_srv; break; case CUST_NODE: *val = bundle->cust_node; break; case CUST_SERV: *val = bundle->cust_srv; break; case TIME_STAMP: *val = bundle->tstamp; break; case TIME_STAMP_SEQ_NR: *val = bundle->tstamp_seq; break; case LIFE_TIME: *val = bundle->lifetime; break; case DIRECTORY_LEN: *val = 0; break; case FRAG_OFFSET: *val = bundle->frag_offs; break; case LENGTH: /* FIXME */ default: LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Unknown attribute"); return 0; } return 1; }
uint8_t my_create_bundle(uint32_t sequence_number, uint32_t * bundle_number, uint32_t lifetime) { struct mmem * ptr = NULL; struct bundle_t * bundle = NULL; int n; uint32_t i; uint8_t payload[60]; uint32_t * bundle_number_ptr; ptr = bundle_create_bundle(); if( ptr == NULL ) { PRINTF("CREATE: Bundle %lu could not be allocated\n", sequence_number); return 0; } bundle = (struct bundle_t *) MMEM_PTR(ptr); if( bundle == NULL ) { PRINTF("CREATE: Bundle %lu could not be allocated\n", sequence_number); return 0; } // Set all attributes for(i=VERSION; i<=FRAG_OFFSET; i++) { bundle_set_attr(ptr, i, &i); } // But set the sequence number to something monotonically increasing bundle_set_attr(ptr, TIME_STAMP_SEQ_NR, &sequence_number); // Set the lifetime bundle_set_attr(ptr, LIFE_TIME, &lifetime); // Fill the payload for(i=0; i<60; i++) { payload[i] = i + (uint8_t) sequence_number; } // Add a payload block bundle_add_block(ptr, BUNDLE_BLOCK_TYPE_PAYLOAD, BUNDLE_BLOCK_FLAG_NULL, payload, 60); // Calculate the bundle number bundle->bundle_num = HASH.hash_convenience(bundle->tstamp_seq, bundle->tstamp, bundle->src_node, bundle->src_srv, bundle->frag_offs, bundle->app_len); // And tell storage to save the bundle n = BUNDLE_STORAGE.save_bundle(ptr, &bundle_number_ptr); if( !n ) { PRINTF("CREATE: Bundle %lu could not be created\n", sequence_number); return 0; } // Copy over the bundle number *bundle_number = *bundle_number_ptr; return 1; }
static size_t bundle_decode_block(struct mmem* const bundlemem, const uint8_t* const buffer, const size_t max_len) { uint8_t type; int block_offs = 0; size_t offs = 0; uint32_t flags, size; struct bundle_t *bundle; struct bundle_block_t *block; int n; type = buffer[offs]; offs++; /* Flags */ offs += sdnv_decode(&buffer[offs], max_len-offs, &flags); /* Payload Size */ offs += sdnv_decode(&buffer[offs], max_len-offs, &size); if (size > max_len-offs) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Bundle payload length too big: %lu > %lu", size, max_len-offs); return 0; } block_offs = bundlemem->size; if( type == BUNDLE_BLOCK_TYPE_AEB ) { // TODO remove const cast return offs + bundle_ageing_parse_age_extension_block(bundlemem, type, flags, (uint8_t*)&buffer[offs], size); } n = mmem_realloc(bundlemem, bundlemem->size + sizeof(struct bundle_block_t) + size); if( !n ) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Bundle payload length too big for MMEM."); return 0; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); bundle->num_blocks++; /* Add the block to the end of the bundle */ block = (struct bundle_block_t *)((uint8_t *)bundle + block_offs); block->type = type; block->flags = flags; block->block_size = size; /* Copy the actual payload over */ memcpy(block->payload, &buffer[offs], block->block_size); return offs + block->block_size; }
struct bundle_block_t *bundle_get_block(struct mmem *bundlemem, uint8_t i) { struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); struct bundle_block_t *block = (struct bundle_block_t *) bundle->block_data; if (i >= bundle->num_blocks) return NULL; for (;i!=0;i--) { block = (struct bundle_block_t *) &block->payload[block->block_size]; } return block; }
/** * \brief deletes bundle from list * \param bundle_number bundle number of the bundle */ void routing_chain_delete_bundle(uint32_t bundle_number) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "flood_del_bundle for bundle %lu", bundle_number); // Find the bundle in our internal storage for( n = list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry->bundle_number == bundle_number ) { break; } } if( n == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "flood_del_bundle for bundle %lu that we do not know", bundle_number); return; } memset(MMEM_PTR(&n->entry), 0, sizeof(struct routing_entry_t)); // Free up the memory for the struct mmem_free(&n->entry); list_remove(routing_list, n); memset(n, 0, sizeof(struct routing_list_entry_t)); // And also free the memory for the list entry memb_free(&routing_mem, n); }
struct bundle_block_t *bundle_get_block_by_type(struct mmem *bundlemem, uint8_t type) { struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); struct bundle_block_t *block = (struct bundle_block_t *) bundle->block_data; int i = 0; for(i=0; i<bundle->num_blocks; i++) { if( block->type == type ) { return block; } block = (struct bundle_block_t *) &block->payload[block->block_size]; } return NULL; }
/** * \brief Parses the age extension block * \param bundlemem Bundle MMEM Pointer * \param type Block type * \param flags Block Flags * \param buffer Block Payload Pointer * \param length Block Payload Length * \return Length of parsed block payload */ uint8_t bundle_ageing_parse_age_extension_block(struct mmem *bundlemem, uint8_t type, uint32_t flags, uint8_t * buffer, int length) { uint8_t offset = 0; struct bundle_t *bundle; /* Check for the proper block type */ if( type != BUNDLE_BLOCK_TYPE_AEB ) { return 0; } if( bundlemem == NULL ) { return 0; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( bundle == NULL ) { return 0; } #if UDTN_SUPPORT_LONG_AEB /* Decode the age block value */ if( sdnv_len(buffer) > 4 ) { // 64 bit operations are expensive - avoid them where possible uint64_t age = 0; offset = sdnv_decode_long(buffer, length, &age); // Convert Age to milliseconds bundle->aeb_value_ms = (uint32_t) (age / 1000); } else { uint32_t age = 0; offset = sdnv_decode(buffer, length, &age); // Convert Age to milliseconds bundle->aeb_value_ms = age / 1000; } #else uint32_t age = 0; offset = sdnv_decode(buffer, length, &age); // Convert Age to milliseconds bundle->aeb_value_ms = age / 1000; #endif return offset; }
uint8_t bundle_set_attr_long(struct mmem *bundlemem, uint8_t attr, const uint64_t* const val) { struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); LOG(LOGD_DTN, LOG_BUNDLE, LOGL_DBG, "set attr %lx",*val); switch (attr) { case DEST_NODE: bundle->dst_node = *val; break; case DEST_SERV: bundle->dst_srv = *val; break; case SRC_NODE: bundle->src_node = *val; break; case SRC_SERV: bundle->src_srv = *val; break; case REP_NODE: bundle->rep_node = *val; break; case REP_SERV: bundle->rep_srv = *val; break; case CUST_NODE: bundle->cust_node = *val; break; case CUST_SERV: bundle->cust_srv = *val; break; case TIME_STAMP: bundle->tstamp = *val; break; case TIME_STAMP_SEQ_NR: bundle->tstamp_seq = *val; break; case LIFE_TIME: bundle->lifetime = *val; break; default: LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Unknown attribute. Possibly no support for 64 bit values."); return 0; } return 1; }
/** * \brief Figure out if the bundle is expired * \param bundlemem Bundle MMEM Pointer * \return 1 = expired, 0 = not expired */ uint8_t bundle_ageing_is_expired(struct mmem * bundlemem) { struct bundle_t *bundle; uint32_t age = 0; if( bundlemem == NULL ) { return 0; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( bundle == NULL ) { return 0; } /* Check age based on age block */ age = bundle_ageing_get_age(bundlemem); if( (age / 1000) > bundle->lifetime ) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_DBG, "Expired (create %u, life %u, aeb %u, rec %u, time %u, age %u)", bundle->tstamp, bundle->lifetime, bundle->aeb_value_ms, bundle->rec_time, xTaskGetTickCount(), age); return 1; } return 0; }
int convergence_layer_send_bundle(struct transmit_ticket_t * ticket) { struct bundle_t *bundle = NULL; uint16_t length = 0; uint8_t * buffer = NULL; uint8_t buffer_length = 0; #if CONVERGENCE_LAYER_SEGMENTATION int ret; int segments; #endif /* CONVERGENCE_LAYER_SEGMENTATION */ LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Sending bundle %lu to %u.%u with ticket %p", ticket->bundle_number, ticket->neighbour.u8[0], ticket->neighbour.u8[1], ticket); if( !(ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART) ) { /* Read the bundle from storage, if it is not in memory */ if( ticket->bundle == NULL ) { ticket->bundle = BUNDLE_STORAGE.read_bundle(ticket->bundle_number); if( ticket->bundle == NULL ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Unable to read bundle %lu", ticket->bundle_number); /* FIXME: Notify somebody */ return -1; } } /* Get our bundle struct and check the pointer */ bundle = (struct bundle_t *) MMEM_PTR(ticket->bundle); if( bundle == NULL ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Invalid bundle pointer for bundle %lu", ticket->bundle_number); bundle_decrement(ticket->bundle); ticket->bundle = NULL; return -1; } /* Check if bundle has expired */ if( bundle_ageing_is_expired(ticket->bundle) ) { LOG(LOGD_DTN, LOG_CL, LOGL_INF, "Bundle %lu has expired, not sending it", ticket->bundle_number); /* Bundle is expired */ bundle_decrement(ticket->bundle); /* Tell storage to delete - it will take care of the rest */ BUNDLE_STORAGE.del_bundle(ticket->bundle_number, REASON_LIFETIME_EXPIRED); return -1; } } /* Get the outgoing network buffer */ buffer = dtn_network_get_buffer(); if( buffer == NULL ) { bundle_decrement(ticket->bundle); ticket->bundle = NULL; return -1; } /* Get the buffer length */ buffer_length = dtn_network_get_buffer_length(); #if CONVERGENCE_LAYER_SEGMENTATION /* We have to use a heuristic to estimate if the bundle will be a multipart bundle */ if( ticket->bundle->size > CONVERGENCE_LAYER_MAX_LENGTH && !(ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART) ) { /* This is a bundle for multiple segments and we have our first look at it */ ticket->flags |= CONVERGENCE_LAYER_QUEUE_MULTIPART; LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Encoding multipart bundle %lu", ticket->bundle_number); /* Now allocate a buffer to serialize the bundle * The size is a rough estimation here and will be reallocated later on */ ret = mmem_alloc(&ticket->buffer, ticket->bundle->size); if( ret < 1 ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Multipart bundle %lu could not be encoded, not enough memory for %u bytes", ticket->bundle_number, ticket->bundle->size); ticket->flags &= ~CONVERGENCE_LAYER_QUEUE_MULTIPART; return -1; } /* Encode the bundle into our temporary buffer */ length = bundle_encode_bundle(ticket->bundle, (uint8_t *) MMEM_PTR(&ticket->buffer), ticket->buffer.size); if( length < 0 ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Multipart bundle %lu could not be encoded, error occured", ticket->bundle_number); mmem_free(&ticket->buffer); ticket->buffer.ptr = NULL; ticket->flags &= ~CONVERGENCE_LAYER_QUEUE_MULTIPART; return -1; } /* Decrease memory size to what is actually needed */ ret = mmem_realloc(&ticket->buffer, length); if( ret < 1 ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Multipart bundle %lu could not be encoded, realloc failed", ticket->bundle_number); mmem_free(&ticket->buffer); ticket->buffer.ptr = NULL; ticket->flags &= ~CONVERGENCE_LAYER_QUEUE_MULTIPART; return -1; } /* We do not need the original bundle anymore */ bundle_decrement(ticket->bundle); ticket->bundle = NULL; /* Initialize the state for this bundle */ ticket->offset_sent = 0; ticket->offset_acked = 0; ticket->sequence_number = outgoing_sequence_number; /* Calculate the number of segments we will need */ segments = (length + 0.5 * CONVERGENCE_LAYER_MAX_LENGTH) / CONVERGENCE_LAYER_MAX_LENGTH; /* And reserve the sequence number space for this bundle to allow for consequtive numbers */ outgoing_sequence_number = (outgoing_sequence_number + segments) % 4; } /* Initialize the header field */ buffer[0] = CONVERGENCE_LAYER_TYPE_DATA & CONVERGENCE_LAYER_MASK_TYPE; /* Check if this is a multipart bundle */ if( ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART ) { /* Calculate the remaining length */ length = ticket->buffer.size - ticket->offset_acked; /* Is it possible, that we send a single-part bundle here because the heuristic * from above failed. So be it. */ if( length <= CONVERGENCE_LAYER_MAX_LENGTH && ticket->offset_acked == 0 ) { /* One bundle per segment, standard flags */ buffer[0] |= (CONVERGENCE_LAYER_FLAGS_FIRST | CONVERGENCE_LAYER_FLAGS_LAST) & CONVERGENCE_LAYER_MASK_FLAGS; } else if( ticket->offset_acked == 0 ) { /* First segment of a bundle */ buffer[0] |= CONVERGENCE_LAYER_FLAGS_FIRST & CONVERGENCE_LAYER_MASK_FLAGS; } else if( length <= CONVERGENCE_LAYER_MAX_LENGTH ) { /* Last segment of a bundle */ buffer[0] |= CONVERGENCE_LAYER_FLAGS_LAST & CONVERGENCE_LAYER_MASK_FLAGS; } else if( length > CONVERGENCE_LAYER_MAX_LENGTH) { /* A segment in the middle of a bundle */ buffer[0] &= ~CONVERGENCE_LAYER_MASK_FLAGS; } /* one byte for the CL header */ length += 1; if( length > CONVERGENCE_LAYER_MAX_LENGTH ) { length = CONVERGENCE_LAYER_MAX_LENGTH; } if( length > buffer_length ) { length = buffer_length; } /* Copy the subset of the bundle into the buffer */ memcpy(buffer + 1, ((uint8_t *) MMEM_PTR(&ticket->buffer)) + ticket->offset_acked, length - 1); /* Every segment so far has been acked */ if( ticket->offset_sent == ticket->offset_acked ) { /* It is the first time that we are sending this segment */ ticket->offset_sent += length - 1; /* Increment the sequence number for the new segment, except for the first segment */ if( ticket->offset_sent != 0 ) { ticket->sequence_number = (ticket->sequence_number + 1) % 4; } } } else { #endif /* CONVERGENCE_LAYER_SEGMENTATION */ /* one byte for the CL header */ length = 1; /* Initialize the header field */ buffer[0] = CONVERGENCE_LAYER_TYPE_DATA & CONVERGENCE_LAYER_MASK_TYPE; /* One bundle per segment, standard flags */ buffer[0] |= (CONVERGENCE_LAYER_FLAGS_FIRST | CONVERGENCE_LAYER_FLAGS_LAST) & CONVERGENCE_LAYER_MASK_FLAGS; /* Encode the bundle into the buffer */ length += bundle_encode_bundle(ticket->bundle, buffer + 1, buffer_length - 1); /* Initialize the sequence number */ ticket->sequence_number = outgoing_sequence_number; outgoing_sequence_number = (outgoing_sequence_number + 1) % 4; #if CONVERGENCE_LAYER_SEGMENTATION } #endif /* CONVERGENCE_LAYER_SEGMENTATION */ /* Put the sequence number for this bundle into the outgoing header */ buffer[0] |= (ticket->sequence_number << 2) & CONVERGENCE_LAYER_MASK_SEQNO; /* Flag the bundle as being in transit now */ ticket->flags |= CONVERGENCE_LAYER_QUEUE_IN_TRANSIT; /* Now we are transmitting */ convergence_layer_transmitting = 1; /* This neighbour is blocked, until we have received the App Layer ACK or NACK */ convergence_layer_set_blocked(&ticket->neighbour); /* And send it out */ dtn_network_send(&ticket->neighbour, length, (void *) ticket); return 1; }
/** * \brief Encodes the age extension block * \param bundlemem Bundle MMEM Pointer * \param buffer Block Payload Pointer * \param max_len Block Payload Length * \return Length of encoded block payload */ uint8_t bundle_ageing_encode_age_extension_block(struct mmem *bundlemem, uint8_t *buffer, int max_len) { struct bundle_t *bundle; uint32_t length = 0; uint8_t offset = 0; uint8_t tmpbuffer[10]; uint32_t flags = 0; int ret; if( bundlemem == NULL ) { return 0; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( bundle == NULL ) { return 0; } #if UDTN_SUPPORT_LONG_AEB /* Update the age value * 4294967 = 0xFFFFFFFF / 1000 */ if( bundle_ageing_get_age(bundlemem) > 4294967 ) { // Keep use of 64 bit data types as low as possible for performance reasons uint64_t age = 0; age = ((uint64_t) bundle_ageing_get_age(bundlemem)) * ((uint64_t) 1000); length = sdnv_encode_long(age, tmpbuffer, 10); } else { uint32_t age = 0; age = bundle_ageing_get_age(bundlemem) * 1000; length = sdnv_encode(age, tmpbuffer, 10); } #else uint32_t age = 0; age = bundle_ageing_get_age(bundlemem) * 1000; length = sdnv_encode(age, tmpbuffer, 10); #endif /* Encode the next block */ buffer[offset] = BUNDLE_BLOCK_TYPE_AEB; offset++; /* Flags */ flags = BUNDLE_BLOCK_FLAG_REPL; ret = sdnv_encode(flags, &buffer[offset], max_len - offset); if (ret < 0) { return 0; } offset += ret; /* Blocksize */ ret = sdnv_encode(length, &buffer[offset], max_len - offset); if (ret < 0) { return 0; } offset += ret; /* Payload */ memcpy(&buffer[offset], tmpbuffer, length); offset += length; return offset; }
struct mmem *bundle_recover_bundle(const uint8_t* const buffer, const size_t size) { uint32_t primary_size, value; size_t offs = 0; struct mmem *bundlemem; struct bundle_t *bundle; int ret = 0; bundlemem = bundle_create_bundle(); if (!bundlemem) return NULL; bundle = (struct bundle_t *) MMEM_PTR(bundlemem); LOG(LOGD_DTN, LOG_BUNDLE, LOGL_DBG, "rec bptr: %p blptr:%p",bundle,buffer); /* Version 0x06 is the one described and supported in RFC5050 */ if (buffer[0] != 0x06) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Version 0x%02x not supported", buffer[0]); goto err; } offs++; /* Flags */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->flags); /* Block Length - Number of bytes in this block following this * field */ offs += sdnv_decode(&buffer[offs], size-offs, &primary_size); primary_size += offs; /* * Use temp variable, otherwise raises hard fault exception. * Variable is not aligned for correct offset, * because of packed attribute. * For example address has to be a multiply of 8. * But packed attribute is needed, * because of memory allocation for the payload block */ uint64_t sdnv_temp = 0; /* Destination node + SSP */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->dst_node); offs += sdnv_decode_long(&buffer[offs], size-offs, &sdnv_temp); bundle->dst_srv = sdnv_temp; /* Source node + SSP */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->src_node); offs += sdnv_decode_long(&buffer[offs], size-offs, &sdnv_temp); bundle->src_srv = sdnv_temp; /* Report-to node + SSP */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->rep_node); offs += sdnv_decode(&buffer[offs], size-offs, &bundle->rep_srv); /* Custodian node + SSP */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->cust_node); offs += sdnv_decode(&buffer[offs], size-offs, &bundle->cust_srv); /* Creation Timestamp */ offs += sdnv_decode_long(&buffer[offs], size-offs, &sdnv_temp); bundle->tstamp = sdnv_temp; /* Creation Timestamp Sequence Number */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->tstamp_seq); /* Lifetime */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->lifetime); /* Directory Length */ offs += sdnv_decode(&buffer[offs], size-offs, &value); if (value != 0) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Bundle does not use CBHE."); goto err; } if (bundle->flags & BUNDLE_FLAG_FRAGMENT) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_INF, "Bundle is a fragment"); /* Fragment Offset */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->frag_offs); /* Total App Data Unit Length */ offs += sdnv_decode(&buffer[offs], size-offs, &bundle->app_len); } if (offs != primary_size) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_ERR, "Problem decoding the primary bundle block."); goto err; } /* FIXME: Loop around and decode all blocks - does this work? */ while (size-offs > 1) { ret = bundle_decode_block(bundlemem, &buffer[offs], size-offs); /* If block decode failed, we are out of memory and have to abort */ if( ret < 1 ) { goto err; } offs += ret; } return bundlemem; err: bundle_delete_bundle(bundlemem); return NULL; }
/** * \brief Callback function informing us about the status of a sent bundle * \param ticket CL transmit ticket of the bundle * \param status status code */ void routing_chain_bundle_sent(struct transmit_ticket_t * ticket, uint8_t status) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; // Tell the agent to call us again to resubmit bundles routing_chain_schedule_resubmission(); // Find the bundle in our internal storage for( n = list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry->bundle_number == ticket->bundle_number ) { break; } } if( n == NULL ) { /* Free up the ticket */ convergence_layer_free_transmit_ticket(ticket); LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "Bundle not in storage"); return; } /* Bundle is not busy anymore */ entry->flags &= ~ROUTING_FLAG_IN_TRANSIT; if( status == ROUTING_STATUS_NACK || status == ROUTING_STATUS_FAIL ) { // NACK = Other side rejected the bundle, try again later // FAIL = Transmission failed /* Free up the ticket */ convergence_layer_free_transmit_ticket(ticket); return; } if( status == ROUTING_STATUS_ERROR ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "Bundle %lu has fatal error, deleting", ticket->bundle_number); /* Bundle failed permanently, we can delete it because it will never be delivered anyway */ entry->flags = 0; routing_chain_check_keep_bundle(ticket->bundle_number); /* Free up the ticket */ convergence_layer_free_transmit_ticket(ticket); return; } // Here: status == ROUTING_STATUS_OK statistics_bundle_outgoing(1); #ifndef TEST_DO_NOT_DELETE_ON_DIRECT_DELIVERY linkaddr_t dest_n = convert_eid_to_rime(entry->destination_node); if (linkaddr_cmp(&ticket->neighbour, &dest_n)) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle sent to destination node"); uint32_t bundle_number = ticket->bundle_number; /* Free up the ticket */ convergence_layer_free_transmit_ticket(ticket); ticket = NULL; // Unset the forward flag entry->flags &= ~ROUTING_FLAG_FORWARD; routing_chain_check_keep_bundle(bundle_number); return; } LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle for %u.%u delivered to %u.%u", dest_n.u8[0], dest_n.u8[1], ticket->neighbour.u8[0], ticket->neighbour.u8[1]); #endif if (entry->send_to < ROUTING_NEI_MEM) { linkaddr_copy(&entry->neighbours[entry->send_to], &ticket->neighbour); entry->send_to++; LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle %lu sent to %u nodes", ticket->bundle_number, entry->send_to); } else if (entry->send_to >= ROUTING_NEI_MEM) { // Here we can delete the bundle from storage, because it will not be routed anyway LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle %lu sent to max number of nodes, deleting", ticket->bundle_number); // Unset the forward flag entry->flags &= ~ROUTING_FLAG_FORWARD; routing_chain_check_keep_bundle(ticket->bundle_number); } /* Free up the ticket */ convergence_layer_free_transmit_ticket(ticket); }
/** * \brief Adds a new bundle to the list of bundles * \param bundle_number bundle number of the bundle * \return >0 on success, <0 on error */ int routing_chain_new_bundle(uint32_t * bundle_number) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; struct mmem * bundlemem = NULL; struct bundle_t * bundle = NULL; LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "agent announces bundle %lu", *bundle_number); // Let us see, if we know this bundle already for( n = list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry->bundle_number == *bundle_number ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "agent announces bundle %lu that is already known", *bundle_number); return -1; } } // Notify statistics statistics_bundle_incoming(1); // Now allocate new memory for the list entry n = memb_alloc(&routing_mem); if( n == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "cannot allocate list entry for bundle, please increase BUNDLE_STORAGE_SIZE"); return -1; } memset(n, 0, sizeof(struct routing_list_entry_t)); // Now allocate new MMEM memory for the struct in the list if( !mmem_alloc(&n->entry, sizeof(struct routing_entry_t)) ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "cannot allocate routing struct for bundle, MMEM is full"); memb_free(&routing_mem, n); return -1; } // Now go and request the bundle from storage bundlemem = BUNDLE_STORAGE.read_bundle(*bundle_number); if( bundlemem == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "unable to read bundle %lu", *bundle_number); mmem_free(&n->entry); memb_free(&routing_mem, n); return -1; } // Get our bundle struct and check the pointer bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( bundle == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_ERR, "invalid bundle pointer for bundle %lu", *bundle_number); mmem_free(&n->entry); memb_free(&routing_mem, n); bundle_decrement(bundlemem); return -1; } // Now we have our entry // We have to get the pointer AFTER getting the bundle from storage, because accessing the // storage may change the MMEM structure and thus the pointers! entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); memset(entry, 0, sizeof(struct routing_entry_t)); // Nothing can go wrong anymore, add the (surrounding) struct to the list list_add(routing_list, n); /* Here we decide if a bundle is to be delivered locally and/or forwarded */ if( bundle->dst_node == dtn_node_id ) { /* This bundle is for our node_id, deliver locally */ LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle is for local"); entry->flags |= ROUTING_FLAG_LOCAL; } else { /* This bundle is not (directly) for us and will be forwarded */ LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle is for forward"); entry->flags |= ROUTING_FLAG_FORWARD; } if( !(bundle->flags & BUNDLE_FLAG_SINGLETON) ) { /* Bundle is not Singleton, so forward it in any case */ LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle is for forward"); entry->flags |= ROUTING_FLAG_FORWARD; } if( registration_is_local(bundle->dst_srv, bundle->dst_node) && bundle->dst_node != dtn_node_id) { /* Bundle is for a local registration, so deliver it locally */ LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "bundle is for local and forward"); entry->flags |= ROUTING_FLAG_LOCAL; entry->flags |= ROUTING_FLAG_FORWARD; } // Now copy the necessary attributes from the bundle entry->bundle_number = *bundle_number; bundle_get_attr(bundlemem, DEST_NODE, &entry->destination_node); bundle_get_attr(bundlemem, SRC_NODE, &entry->source_node); linkaddr_copy(&entry->received_from_node, &bundle->msrc); // Now that we have the bundle, we do not need the allocated memory anymore bundle_decrement(bundlemem); bundlemem = NULL; bundle = NULL; // Schedule to deliver and forward the bundle routing_chain_schedule_resubmission(); // We do not have a failure here, so it must be a success return 1; }
int bundle_encode_bundle(struct mmem *bundlemem, uint8_t *buffer, int max_len) { uint8_t i; uint32_t value, offs = 0, blklen_offs; int ret, blklen_size; struct bundle_t *bundle = (struct bundle_t *) MMEM_PTR(bundlemem); struct bundle_block_t *block; /* Hardcode the version to 0x06 */ buffer[0] = 0x06; offs++; /* Flags */ ret = sdnv_encode(bundle->flags, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Block length will be calculated later * reserve one byte for now */ blklen_offs = offs; offs++; /* Destination node + SSP */ ret = sdnv_encode(bundle->dst_node, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; ret = sdnv_encode_long(bundle->dst_srv, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Source node + SSP */ ret = sdnv_encode(bundle->src_node, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; ret = sdnv_encode_long(bundle->src_srv, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Report-to node + SSP */ ret = sdnv_encode(bundle->rep_node, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; ret = sdnv_encode(bundle->rep_srv, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Custodian node + SSP */ ret = sdnv_encode(bundle->cust_node, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; ret = sdnv_encode(bundle->cust_srv, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Creation Timestamp */ ret = sdnv_encode_long(bundle->tstamp, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Creation Timestamp Sequence Number */ ret = sdnv_encode(bundle->tstamp_seq, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Lifetime */ ret = sdnv_encode(bundle->lifetime, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Directory Length */ ret = sdnv_encode(0l, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; if (bundle->flags & BUNDLE_FLAG_FRAGMENT) { LOG(LOGD_DTN, LOG_BUNDLE, LOGL_INF, "Bundle is a fragment"); /* Fragment Offset */ ret = sdnv_encode(bundle->frag_offs, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; /* Total App Data Unit Length */ ret = sdnv_encode(bundle->app_len, &buffer[offs], max_len - offs); if (ret < 0) return -1; offs += ret; } /* Calculate block length value */ value = offs - blklen_offs - 1; blklen_size = sdnv_encoding_len(value); /* Move the data around */ if (blklen_size > 1) { memmove(&buffer[blklen_offs+blklen_size], &buffer[blklen_offs+1], value); } ret = sdnv_encode(value, &buffer[blklen_offs], blklen_size); offs += ret-1; /* Encode Bundle Age Block - always as first block */ offs += bundle_ageing_encode_age_extension_block(bundlemem, &buffer[offs], max_len - offs); block = (struct bundle_block_t *) bundle->block_data; for (i=0;i<bundle->num_blocks;i++) { offs += bundle_encode_block(block, &buffer[offs], max_len - offs); /* Reference the next block */ block = (struct bundle_block_t *) &block->payload[block->block_size]; } return offs; }
/** * Return values: * 1 = SUCCESS * -1 = Temporary error * -2 = Permanent error */ int convergence_layer_parse_dataframe(linkaddr_t * source, uint8_t * payload, uint8_t payload_length, uint8_t flags, uint8_t sequence_number, packetbuf_attr_t rssi) { struct mmem * bundlemem = NULL; struct bundle_t * bundle = NULL; struct transmit_ticket_t * ticket = NULL; int n; int length; #if CONVERGENCE_LAYER_SEGMENTATION int ret; #endif /* CONVERGENCE_LAYER_SEGMENTATION */ /* Note down the payload length */ length = payload_length; if( flags != (CONVERGENCE_LAYER_FLAGS_FIRST | CONVERGENCE_LAYER_FLAGS_LAST ) ) { #if CONVERGENCE_LAYER_SEGMENTATION /* We have a multipart bundle here */ if( flags == CONVERGENCE_LAYER_FLAGS_FIRST ) { /* Beginning of a new bundle from a peer, remove old tickets */ for( ticket = list_head(transmission_ticket_list); ticket != NULL; ticket = list_item_next(ticket) ) { if( linkaddr_cmp(&ticket->neighbour, source) && (ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART_RECV) ) { break; } } /* We found a ticket, remove it */ if( ticket != NULL ) { LOG(LOGD_DTN, LOG_CL, LOGL_WRN, "Resynced to peer %u.%u, throwing away old buffer", source->u8[0], source->u8[1]); convergence_layer_free_transmit_ticket(ticket); ticket = NULL; } /* Allocate a new ticket for the incoming bundle */ ticket = convergence_layer_get_transmit_ticket_priority(CONVERGENCE_LAYER_PRIORITY_HIGH); if( ticket == NULL ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Unable to allocate multipart receive ticket"); return -1; } /* Fill the fields of the ticket */ linkaddr_copy(&ticket->neighbour, source); ticket->flags = CONVERGENCE_LAYER_QUEUE_MULTIPART_RECV; ticket->timestamp = clock_time(); ticket->sequence_number = sequence_number; /* Now allocate some memory */ ret = mmem_alloc(&ticket->buffer, length); if( ret < 1 ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Unable to allocate multipart receive buffer of %u bytes", length); convergence_layer_free_transmit_ticket(ticket); ticket = NULL; return -1; } /* Copy the payload into the buffer */ memcpy(MMEM_PTR(&ticket->buffer), payload, length); /* We are waiting for more segments, return now */ return 1; } else { /* Either the middle of the end of a bundle, go look for the ticket */ for( ticket = list_head(transmission_ticket_list); ticket != NULL; ticket = list_item_next(ticket) ) { if( linkaddr_cmp(&ticket->neighbour, source) && (ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART_RECV) ) { break; } } /* Cannot find a ticket, discard segment */ if( ticket == NULL ) { LOG(LOGD_DTN, LOG_CL, LOGL_WRN, "Segment from peer %u.%u does not match any bundles in progress, discarding", source->u8[0], source->u8[1]); return -1; } if( sequence_number != (ticket->sequence_number + 1) % 4 ) { LOG(LOGD_DTN, LOG_CL, LOGL_WRN, "Segment from peer %u.%u is out of sequence. Recv %u, Exp %u", source->u8[0], source->u8[1], sequence_number, (ticket->sequence_number + 1) % 4); return 1; } /* Store the last received and valid sequence number */ ticket->sequence_number = sequence_number; /* Note down the old length to know where to start */ n = ticket->buffer.size; /* Allocate more memory */ ret = mmem_realloc(&ticket->buffer, ticket->buffer.size + length); if( ret < 1 ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Unable to re-allocate multipart receive buffer of %u bytes", ticket->buffer.size + length); convergence_layer_free_transmit_ticket(ticket); return -1; } /* Update timestamp to avoid the ticket from timing out */ ticket->timestamp = clock_time(); /* And append the payload */ memcpy(((uint8_t *) MMEM_PTR(&ticket->buffer)) + n, payload, length); } if( flags & CONVERGENCE_LAYER_FLAGS_LAST ) { /* We have the last segment, change pointer so that the rest of the function works as planned */ payload = (uint8_t *) MMEM_PTR(&ticket->buffer); length = ticket->buffer.size; LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "%u byte multipart bundle received from %u.%u, parsing", length, source->u8[0], source->u8[1]); } else { /* We are waiting for more segments, return now */ return 1; } #else /* CONVERGENCE_LAYER_SEGMENTATION */ /* We will never be able to parse that bundle, signal a permanent error */ return -2; #endif /* CONVERGENCE_LAYER_SEGMENTATION */ } /* Allocate memory, parse the bundle and set reference counter to 1 */ bundlemem = bundle_recover_bundle(payload, length); /* We do not need the ticket anymore if there was one, deallocate it */ if( ticket != NULL ) { convergence_layer_free_transmit_ticket(ticket); ticket = NULL; } if( !bundlemem ) { LOG(LOGD_DTN, LOG_CL, LOGL_WRN, "Error recovering bundle"); /* Possibly not enough memory -> temporary error */ return -1; } bundle = (struct bundle_t *) MMEM_PTR(bundlemem); if( !bundle ) { LOG(LOGD_DTN, LOG_CL, LOGL_WRN, "Invalid bundle pointer"); bundle_decrement(bundlemem); /* Possibly not enough memory -> temporary error */ return -1; } /* Check for bundle expiration */ if( bundle_ageing_is_expired(bundlemem) ) { LOG(LOGD_DTN, LOG_CL, LOGL_ERR, "Bundle received from %u.%u with SeqNo %u is expired", source->u8[0], source->u8[1], sequence_number); bundle_decrement(bundlemem); /* Send permanent rejection */ return -2; } /* Mark the bundle as "internal" */ bundle->source_process = &agent_process; LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Bundle from ipn:%lu.%lu (to ipn:%lu.%lu) received from %u.%u with SeqNo %u", bundle->src_node, bundle->src_srv, bundle->dst_node, bundle->dst_srv, source->u8[0], source->u8[1], sequence_number); /* Store the node from which we received the bundle */ linkaddr_copy(&bundle->msrc, source); /* Store the RSSI for this packet */ bundle->rssi = rssi; /* Hand over the bundle to dispatching */ n = dispatching_dispatch_bundle(bundlemem); bundlemem = NULL; if( n ) { /* Dispatching was successfull! */ return 1; } /* Temporary error */ return -1; }
uint8_t my_verify_bundle(uint32_t bundle_number, uint32_t sequence_number) { uint32_t attr = 0; uint32_t i; int errors = 0; struct bundle_block_t * block = NULL; struct mmem * ptr = NULL; ptr = BUNDLE_STORAGE.read_bundle(bundle_number); if( ptr == NULL ) { PRINTF("VERIFY: MMEM ptr is invalid\n"); return 0; } struct bundle_t * bundle = (struct bundle_t *) MMEM_PTR(ptr); if( bundle == NULL ) { PRINTF("VERIFY: bundle ptr is invalid\n"); return 0; } // Verify the attributes for(i=VERSION; i<=FRAG_OFFSET; i++) { bundle_get_attr(ptr, i, &attr); if( i == TIME_STAMP_SEQ_NR || i == LENGTH || i == DIRECTORY_LEN || i == LIFE_TIME ) { continue; } if( attr != i ) { PRINTF("VERIFY: attribute %lu mismatch\n", i); errors ++; } } // Verify the sequence number bundle_get_attr(ptr, TIME_STAMP_SEQ_NR, &attr); if( attr != sequence_number ) { PRINTF("VERIFY: sequence number mismatch\n"); errors ++; } block = bundle_get_payload_block(ptr); if( block == NULL ) { PRINTF("VERIFY: unable to get payload block\n"); errors ++; } else { // Fill the payload for(i=0; i<60; i++) { if( block->payload[i] != (i + (uint8_t) sequence_number) ) { PRINTF("VERIFY: payload byte %lu mismatch\n", i); errors ++; } } } bundle_decrement(ptr); if( errors ) { return 0; } return 1; }
PROCESS_THREAD(test_process, ev, data) { static int n; static int i; static int errors = 0; static struct etimer timer; static uint32_t time_start, time_stop; uint8_t buffer[128]; int bundle_length; struct mmem * bundle_original = NULL; struct mmem * bundle_restored = NULL; struct mmem * bundle_spare = NULL; uint32_t bundle_number; uint32_t bundle_number_spare; PROCESS_BEGIN(); PROCESS_PAUSE(); profiling_init(); profiling_start(); // Wait again etimer_set(&timer, CLOCK_SECOND); PROCESS_WAIT_UNTIL(etimer_expired(&timer)); /* Profile initialization separately */ profiling_stop(); watchdog_stop(); profiling_report("init", 0); watchdog_start(); printf("Init done, starting test using %s storage\n", BUNDLE_STORAGE.name); profiling_init(); profiling_start(); // Measure the current time time_start = test_precise_timestamp(); for(i=0; i<=1; i++) { struct mmem bla; if( i > 0 ) { mmem_alloc(&bla, 1); } printf("Serializing and deserializing bundle...\n"); if( my_create_bundle(0, &bundle_number, 3600) ) { printf("\tBundle created successfully \n"); } else { printf("\tBundle could not be created \n"); errors ++; } printf("Serializing and deserializing bundle...\n"); if( my_create_bundle(1, &bundle_number_spare, 3600) ) { printf("\tSpare Bundle created successfully \n"); } else { printf("\tSpare Bundle could not be created \n"); errors ++; } bundle_original = BUNDLE_STORAGE.read_bundle(bundle_number); if( bundle_original == NULL ) { printf("VERIFY: MMEM ptr is invalid\n"); errors ++; } bundle_spare = BUNDLE_STORAGE.read_bundle(bundle_number_spare); if( bundle_spare == NULL ) { printf("VERIFY: MMEM ptr is invalid\n"); errors ++; } // Fake timing information in the bundle to make verify successful struct bundle_t * bundle_original_bundle = (struct bundle_t *) MMEM_PTR(bundle_original); bundle_original_bundle->aeb_value_ms = 54; bundle_original_bundle->rec_time = clock_time(); // Serialize the bundle memset(buffer, 0, 128); bundle_length = bundle_encode_bundle(bundle_original, buffer, 128); if( bundle_length < 0 ) { printf("SERIALIZE: fail\n"); errors ++; } n = my_static_compare(buffer, bundle_length); if( n > 0 ) { printf("COMPARE: fail\n"); errors += n; } // Deserialize it bundle_restored = bundle_recover_bundle(buffer, bundle_length); if( bundle_restored == NULL ) { printf("DESERIALIZE: unable to recover\n"); errors ++; } n = my_compare_bundles(bundle_original, bundle_restored); if( n == 0 ) { printf("\tBundle serialized and deserialized successfully\n"); } else { printf("COMPARE: differences\n"); errors ++; } // Dellocate memory bundle_decrement(bundle_restored); bundle_restored = NULL; bundle_decrement(bundle_original); bundle_original = NULL; bundle_decrement(bundle_spare); bundle_spare = NULL; memset(buffer, 0, 128); // Delete bundle from storage n = BUNDLE_STORAGE.del_bundle(bundle_number, REASON_DELIVERED); if( n ) { printf("\tBundle deleted successfully\n"); } else { printf("\tBundle could not be deleted\n"); errors++; } printf("Comparing static bundle...\n"); if( my_create_bundle(0, &bundle_number, 3600) ) { printf("\tReference Bundle created successfully \n"); } else { printf("\ttReference Bundle could not be created \n"); errors ++; } bundle_original = BUNDLE_STORAGE.read_bundle(bundle_number); if( bundle_original == NULL ) { printf("VERIFY: MMEM ptr is invalid\n"); errors ++; } // Deserialize it bundle_restored = bundle_recover_bundle(static_compare_bundle, sizeof(static_compare_bundle)); if( bundle_restored == NULL ) { printf("DESERIALIZE: unable to recover static bundle\n"); errors ++; } // Deserialize it one more time bundle_spare = bundle_recover_bundle(static_compare_bundle, sizeof(static_compare_bundle)); if( bundle_spare == NULL ) { printf("DESERIALIZE: unable to recover static bundle\n"); errors ++; } n = my_compare_bundles(bundle_original, bundle_restored); if( n == 0 ) { printf("\tStatic Bundle verified successfully\n"); } else { printf("COMPARE: differences\n"); errors ++; } n = my_compare_bundles(bundle_original, bundle_spare); if( n == 0 ) { printf("\tStatic Bundle verified successfully\n"); } else { printf("COMPARE: differences\n"); errors ++; } // Dellocate memory bundle_decrement(bundle_restored); bundle_restored = NULL; bundle_decrement(bundle_original); bundle_original = NULL; bundle_decrement(bundle_spare); bundle_spare = NULL; } time_stop = test_precise_timestamp(); watchdog_stop(); profiling_report("serializer", 0); TEST_REPORT("No of errors", errors, 1, "errors"); TEST_REPORT("Duration", time_stop-time_start, CLOCK_SECOND, "s"); if( errors > 0 ) { TEST_FAIL("More than 0 errors occured"); } else { TEST_PASS(); } PROCESS_END(); }
int convergence_layer_parse_ackframe(linkaddr_t * source, uint8_t * payload, uint8_t length, uint8_t sequence_number, uint8_t type, uint8_t flags) { struct transmit_ticket_t * ticket = NULL; struct bundle_t * bundle = NULL; /* This neighbour is now unblocked */ convergence_layer_set_unblocked(source); if( convergence_layer_pending == 0 ) { /* Poll the process to initiate transmission of the next bundle */ process_poll(&convergence_layer_process); } LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Incoming ACK from %u.%u for SeqNo %u", source->u8[0], source->u8[1], sequence_number); for(ticket = list_head(transmission_ticket_list); ticket != NULL; ticket = list_item_next(ticket) ) { if( linkaddr_cmp(source, &ticket->neighbour) && (ticket->flags & CONVERGENCE_LAYER_QUEUE_ACK_PEND) ) { break; } } /* Unable to find that bundle */ if( ticket == NULL ) { return -1; } /* Does the originator need forward notification? */ if( type == CONVERGENCE_LAYER_TYPE_ACK && ticket->bundle != NULL ) { bundle = (struct bundle_t *) MMEM_PTR(ticket->bundle); /* Is the forward report flag set? */ if( bundle->flags & BUNDLE_FLAG_REP_FWD ) { STATUSREPORT.send(ticket->bundle, NODE_FORWARDED_BUNDLE, NO_ADDITIONAL_INFORMATION); } } /* TODO: Handle temporary NACKs separately here */ if( type == CONVERGENCE_LAYER_TYPE_ACK ) { #if CONVERGENCE_LAYER_SEGMENTATION if( ticket->flags & CONVERGENCE_LAYER_QUEUE_MULTIPART ) { if( sequence_number == (ticket->sequence_number + 1) % 4 ) { // ACK received ticket->offset_acked = ticket->offset_sent; if( ticket->offset_acked == ticket->buffer.size ) { /* Last segment, we are done */ LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Last Segment of bundle %lu acked, done", ticket->bundle_number); } else { /* There are more segments, keep on sending */ LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "One Segment of bundle %lu acked, more to come", ticket->bundle_number); return 1; } } else { /* Duplicate or out of sequence ACK, ignore it */ LOG(LOGD_DTN, LOG_CL, LOGL_DBG, "Duplicate ACK for bundle %lu received", ticket->bundle_number); return 1; } } #endif /* CONVERGENCE_LAYER_SEGMENTATION */ /* Bundle has been ACKed and is now done */ ticket->flags = CONVERGENCE_LAYER_QUEUE_DONE; /* Notify routing module */ ROUTING.sent(ticket, ROUTING_STATUS_OK); } else if( type == CONVERGENCE_LAYER_TYPE_NACK ) { /* Bundle has been NACKed and is now done */ ticket->flags = CONVERGENCE_LAYER_QUEUE_FAIL; /* Notify routing module */ if( flags & CONVERGENCE_LAYER_FLAGS_FIRST ) { /* Temporary NACK */ ROUTING.sent(ticket, ROUTING_STATUS_TEMP_NACK); } else { /* Permanent NACK */ ROUTING.sent(ticket, ROUTING_STATUS_NACK); } } /* We can free the bundle memory */ if( ticket->bundle != NULL ) { bundle_decrement(ticket->bundle); ticket->bundle = NULL; } return 1; }
/** * \brief iterate through all bundles and forward bundles */ void routing_chain_send_to_known_neighbours(void) { struct routing_list_entry_t * n = NULL; struct routing_entry_t * entry = NULL; int try_to_forward = 1; int try_local = 1; int h = 0; LOG(LOGD_DTN, LOG_ROUTE, LOGL_DBG, "send to known neighbours"); /** * It is likely, that we will have less neighbours than bundles - therefore, we want to to go through bundles only once */ for( n = (struct routing_list_entry_t *) list_head(routing_list); n != NULL; n = list_item_next(n) ) { entry = (struct routing_entry_t *) MMEM_PTR(&n->entry); if( entry == NULL ) { LOG(LOGD_DTN, LOG_ROUTE, LOGL_WRN, "Bundle with invalid MMEM structure"); } if( try_local ) { /* Is the bundle for local? */ h = routing_chain_send_to_local(entry); /* We can only deliver only bundle at a time to local processes to speed up the whole thing */ if( h == CHAIN_ROUTE_RETURN_OK ) { try_local = 0; } } /* Skip this bundle, if it is not queued for forwarding */ if( !(entry->flags & ROUTING_FLAG_FORWARD) || (entry->flags & ROUTING_FLAG_IN_TRANSIT) || !try_to_forward ) { continue; } /* Try to forward it to the destination, if it is our neighbour */ h = routing_chain_forward_directly(entry); if( h == CHAIN_ROUTE_RETURN_OK ) { /* Bundle will be delivered, to skip the remainder if this function*/ continue; } else if( h == CHAIN_ROUTE_RETURN_CONTINUE ) { /* Bundle was not delivered, continue as normal */ } else if( h == CHAIN_ROUTE_RETURN_FAIL ) { /* Enqueuing the bundle failed, to stop the forwarding process */ try_to_forward = 0; continue; } /* At this point, we know that the bundle is not for one of our neighbours, so send it to all the others */ h = routing_chain_forward_normal(entry); if( h == CHAIN_ROUTE_RETURN_OK ) { /* Bundle will be forwarded, continue as normal */ } else if( h == CHAIN_ROUTE_RETURN_CONTINUE ) { /* Bundle will not be forwarded, continue as normal */ } else if( h == CHAIN_ROUTE_RETURN_FAIL ) { /* Enqueuing the bundle failed, to stop the forwarding process */ try_to_forward = 0; continue; } } }