Esempio n. 1
0
void gsm_call_answer()
{
    ElemType elem;
    char buf[30];
    char tmp[30];
    bool ring_flag = false;
    bool ans_flag = false;
    unsigned int ret = 0;
    
            strcpy(buf, "ATA\r\n");
            gsm_send(buf, NULL);
            ring_flag = false;
            ans_flag = true;
#if 1
        if(cb_isempty(cb) == 0 && ans_flag){
            memset(&elem, 0, sizeof(elem));
            cb_read(cb, &elem);
            memset(tmp, 0, sizeof(tmp));
            memcpy(tmp, elem.data, elem.len);
            tmp[elem.len + 1] = '\0';
            if(strcmp(tmp, "OK") == 0)
                call_init_state = CNT_STATE;
        }
#endif
#if 0 
        memset(tmp, 0, sizeof(tmp));
        memcpy(tmp, elem.data, elem.len);
        tmp[elem.len+1] = '\n';

        if(strcmp(tmp, "OK") == 0){
            call_init_state = END_STATE;
            return;
        }
#endif

}
Esempio n. 2
0
static void *pkt_processing_loop(void *arg) {
  while(1) {
    queuing_pkt_t *q_pkt = (queuing_pkt_t *) cb_read(rmt_queuing_cb);
    buffered_pkt_t *b_pkt = &q_pkt->pkt;
    RMT_LOG(P4_LOG_LEVEL_TRACE, "queuing system: packet dequeued\n");
    memset(metadata, 0, ${bytes});
    metadata_extract(metadata, q_pkt->metadata, q_pkt->metadata_recirc);

    /* after this point, no one should use q_pkt->metadata */

    int mirror_id = metadata_get_clone_spec(metadata);
    metadata_set_clone_spec(metadata, 0);
    int egress_port;
//:: if enable_pre:
    if (pre_replication(q_pkt, metadata)) {
      continue;
    }
//:: #endif
    if(mirror_id == 0) {
      egress_port = metadata_get_egress_spec(metadata);
      /* TODO: formalize somewhere that 511 is the drop port for this target */
//:: if "ingress_drop_ctl" in extra_metadata_name_map:
      // program uses the separate ingress_drop_ctl register
      // a non-zero value means drop
      if(egress_port = 511 || metadata_get_ingress_drop_ctl(metadata)) {
//:: else:
      if(egress_port == 511) {
//:: #endif
	RMT_LOG(P4_LOG_LEVEL_VERBOSE, "dropping packet at ingress\n");
	free(b_pkt->pkt_data);
	free(q_pkt->metadata);
	free(q_pkt->metadata_recirc);
	free(q_pkt);
	continue;
      }
    }
    else {
      RMT_LOG(P4_LOG_LEVEL_VERBOSE, "mirror id is %d\n", mirror_id);
      egress_port = ${pd_prefix}mirroring_mapping_get_egress_port(mirror_id);
      if (egress_port < 0) {
	RMT_LOG(P4_LOG_LEVEL_WARN,
		"no mapping for mirror id %d, dropping packet\n", mirror_id);
	free(b_pkt->pkt_data);
	free(q_pkt->metadata);
	free(q_pkt->metadata_recirc);
	free(q_pkt);
	continue;
      }
      RMT_LOG(P4_LOG_LEVEL_TRACE,
             "queuing system: cloned packet, mirror_id -> egress_port : %d -> %d\n",
             mirror_id, egress_port);
    }
    metadata_set_egress_port(metadata, egress_port);
    RMT_LOG(P4_LOG_LEVEL_TRACE, "egress port set to %d\n", egress_port);

    pkt_instance_type_t instance_type = b_pkt->instance_type;
    metadata_set_instance_type(metadata, instance_type);
    RMT_LOG(P4_LOG_LEVEL_TRACE, "instance type set to %d\n", instance_type);

//::  if enable_intrinsic:
    /* Set enqueue metadata */
    metadata_set_enq_qdepth(metadata, egress_pipeline_count(egress_port));
    metadata_set_enq_timestamp(metadata, get_timestamp());
//::  #endif 
    
    metadata_dump(q_pkt->metadata, metadata);
    egress_pipeline_receive(egress_port,
			    q_pkt->metadata, q_pkt->metadata_recirc,
			    b_pkt->pkt_data, b_pkt->pkt_len, b_pkt->pkt_id,
			    b_pkt->instance_type);
    free(q_pkt);
  }

  return NULL;
}
CBAPI int CBCALL cb_read_single(struct circular_buffer *buffer, char *target)
{
	return cb_read(buffer, target, 1);
}
Esempio n. 4
0
/**
 * Interface (extern): Computes the k nearest neighbors for a given set of test points
 * stored in *Xtest and stores the results in two arrays *distances and *indices.
 *
 * @param *Xtest Pointer to the set of query/test points (stored as FLOAT_TYPE)
 * @param nXtest The number of query points
 * @param dXtest The dimension of each query point
 * @param *distances The distances array (FLOAT_TYPE) used to store the computed distances
 * @param ndistances The number of query points
 * @param ddistances The number of distance values for each query point
 * @param *indices Pointer to arrray storing the indices of the k nearest neighbors for each query point
 * @param nindices The number of query points
 * @param dindices The number of indices comptued for each query point
 * @param *tree_record Pointer to struct storing all relevant information for model
 * @param *params Pointer to struct containing all relevant parameters
 *
 */
void neighbors_extern(FLOAT_TYPE * Xtest,
		INT_TYPE nXtest,
		INT_TYPE dXtest,
		FLOAT_TYPE *distances,
		INT_TYPE ndistances,
		INT_TYPE ddistances,
		INT_TYPE *indices,
		INT_TYPE nindices,
		INT_TYPE dindices,
		TREE_RECORD *tree_record,
		TREE_PARAMETERS *params) {

	START_MY_TIMER(tree_record->timers + 1);

	UINT_TYPE i, j;
	tree_record->find_leaf_idx_calls = 0;
	tree_record->empty_all_buffers_calls = 0;
	tree_record->Xtest = Xtest;
	tree_record->nXtest = nXtest;
	tree_record->dist_mins_global = distances;
	tree_record->idx_mins_global = indices;

	long device_mem_bytes = tree_record->device_infos.device_mem_bytes;
	double test_mem_bytes = get_test_tmp_mem_device_bytes(tree_record, params);
	PRINT(params)("Memory needed for test patterns: %f (GB)\n", test_mem_bytes / MEM_GB);
	if (test_mem_bytes > device_mem_bytes * params->allowed_test_mem_percent) {
		PRINT(params)("Too much memory used for test patterns and temporary data!\n");
		FREE_OPENCL_DEVICES(tree_record, params);
		exit(EXIT_FAILURE);
	}

	double total_device_bytes = get_total_mem_device_bytes(tree_record, params);
	PRINT(params)("Total memory needed on device: %f (GB)\n", total_device_bytes / MEM_GB);

	START_MY_TIMER(tree_record->timers + 4);

	/* ------------------------------------- OPENCL -------------------------------------- */
	INIT_ARRAYS(tree_record, params);
	/* ------------------------------------- OPENCL -------------------------------------- */

	// initialize leaf buffer for test queries (circular buffers)
	tree_record->buffers = (circular_buffer **) malloc(tree_record->n_leaves * sizeof(circular_buffer*));
	for (i = 0; i < tree_record->n_leaves; i++) {
		tree_record->buffers[i] = (circular_buffer *) malloc(sizeof(circular_buffer));
		cb_init(tree_record->buffers[i], tree_record->leaves_initial_buffer_sizes);
	}

	tree_record->buffer_full_warning = 0;

	// initialize queue "input" (we can have at most number_test_patterns in there)
	cb_init(&(tree_record->queue_reinsert), tree_record->nXtest);

	/* ------------------------------------- OPENCL -------------------------------------- */
	START_MY_TIMER(tree_record->timers + 3);
	ALLOCATE_MEMORY_OPENCL_DEVICES(tree_record, params);
	STOP_MY_TIMER(tree_record->timers + 3);
	/* ------------------------------------- OPENCL -------------------------------------- */

	UINT_TYPE iter = 0;
	UINT_TYPE test_printed = 0;

	// allocate space for the indices added in each round; we cannot have more than original test patterns ...
	INT_TYPE *all_next_indices = (INT_TYPE *) malloc(
			tree_record->approx_number_of_avail_buffer_slots * sizeof(INT_TYPE));

	// allocate space for all return values (by FIND_LEAF_IDX_BATCH)
	tree_record->leaf_indices_batch_ret_vals = (INT_TYPE *) malloc(
			tree_record->approx_number_of_avail_buffer_slots * sizeof(INT_TYPE));

	UINT_TYPE num_elts_added;
	tree_record->current_test_index = 0;
	INT_TYPE reinsert_counter = 0;

	PRINT(params)("Starting Querying process via buffer tree...\n");

	STOP_MY_TIMER(tree_record->timers + 4);
	START_MY_TIMER(tree_record->timers + 2);

	do {

		iter++;

		// try to get elements from both queues until buffers are full
		// (each buffer is either empty or has at least space for leaves_buffer_sizes_threshold elements)
		num_elts_added = 0;

		// add enough elements to the buffers ("batch filling")
		while (num_elts_added < tree_record->approx_number_of_avail_buffer_slots
				&& (tree_record->current_test_index < tree_record->nXtest
						|| !cb_is_empty(&(tree_record->queue_reinsert)))) {

			// we remove indices from both queues here (add one element from each queue, if not empty)
			if (!cb_is_empty(&(tree_record->queue_reinsert))) {
				cb_read(&(tree_record->queue_reinsert), all_next_indices + num_elts_added);
			} else {
				all_next_indices[num_elts_added] = tree_record->current_test_index;
				tree_record->current_test_index++;
			}
			num_elts_added++;
		}

		/* ------------------------------------- OPENCL -------------------------------------- */
		FIND_LEAF_IDX_BATCH(all_next_indices, num_elts_added, tree_record->leaf_indices_batch_ret_vals, tree_record,
				params);
		/* ------------------------------------- OPENCL -------------------------------------- */

		// we have added num_elts_added indices to the all_next_indices array
		for (j = 0; j < num_elts_added; j++) {

			INT_TYPE leaf_idx = tree_record->leaf_indices_batch_ret_vals[j];

			// if not done: add the index to the appropriate buffer
			if (leaf_idx != -1) {

				// enlarge buffer if needed
				if (cb_is_full(tree_record->buffers[leaf_idx])) {
					PRINT(params)("Increasing buffer size ...\n");
					tree_record->buffers[leaf_idx] = cb_double_size(tree_record->buffers[leaf_idx]);
				}

				// add next_indices[j] to buffer leaf_idx
				cb_write(tree_record->buffers[leaf_idx], all_next_indices + j);

				if (cb_get_number_items(tree_record->buffers[leaf_idx]) >= tree_record->leaves_buffer_sizes_threshold) {
					tree_record->buffer_full_warning = 1;
				}

			} // else: traversal of test pattern has reached root: done!
		}

		/* ------------------------------------- OPENCL -------------------------------------- */
		PROCESS_ALL_BUFFERS(tree_record, params);
		/* ------------------------------------- OPENCL -------------------------------------- */

		if (tree_record->current_test_index == tree_record->nXtest && !test_printed) {
			PRINT(params)("All query indices are in the buffer tree now (buffers or reinsert queue)...\n");
			test_printed = 1;
		}

	} while (tree_record->current_test_index < tree_record->nXtest || !cb_is_empty(&(tree_record->queue_reinsert)));

	STOP_MY_TIMER(tree_record->timers + 2);

	START_MY_TIMER(tree_record->timers + 5);
	/* ------------------------------------- OPENCL -------------------------------------- */
	GET_DISTANCES_AND_INDICES(tree_record, params);
	/* ------------------------------------- OPENCL -------------------------------------- */

	// free space generated by testing
	for (i = 0; i < tree_record->n_leaves; i++) {
		cb_free(tree_record->buffers[i]);
	}
	STOP_MY_TIMER(tree_record->timers + 5);
	STOP_MY_TIMER(tree_record->timers + 1);

	PRINT(params)("Buffer full indices (overhead)=%i\n", reinsert_counter);
	PRINT(params)("\nNumber of iterations in while loop: \t\t\t\t\t\t\t%i\n", iter);
	PRINT(params)("Number of empty_all_buffers calls: \t\t\t\t\t\t\t%i\n", tree_record->empty_all_buffers_calls);
	PRINT(params)("Number of find_leaf_idx_calls: \t\t\t\t\t\t\t\t%i\n\n", tree_record->find_leaf_idx_calls);

	PRINT(params)("Elapsed total time for querying: \t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 1));
	PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n");
	PRINT(params)("(Overhead)  Elapsed time for BEFORE WHILE: \t\t\t\t\t%2.10f\n",
			GET_MY_TIMER(tree_record->timers + 4));
	PRINT(params)("(Overhead)  -> ALLOCATE_MEMORY_OPENCL_DEVICES: \t\t\t\t\t%2.10f\n",
			GET_MY_TIMER(tree_record->timers + 3));

	PRINT(params)(
			"-----------------------------------------------------------------------------------------------------------------------------\n");
	PRINT(params)("Elapsed time in while-loop: \t\t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 2));
	PRINT(params)("(I)    Elapsed time for PROCESS_ALL_BUFFERS: \t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 12));
	PRINT(params)("(I.A)  Function: retrieve_indices_from_buffers_gpu: \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 11));
	PRINT(params)("(I.B)  Do brute-force (do_brute.../process_buffers_...chunks_gpu : \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 18));
	PRINT(params)("(I.B.1) -> Elapsed time for clEnqueueWriteBuffer (INTERLEAVED): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 19));
	PRINT(params)("(I.B.1) -> Elapsed time for memcpy (INTERLEAVED): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 21));
	PRINT(params)("(I.B.1) -> Elapsed time for waiting for chunk (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 22));
	PRINT(params)("(I.B.2) -> Number of copy calls: %i\n", tree_record->counters[0]);

	if (!training_chunks_inactive(tree_record, params)) {
		PRINT(params)("(I.B.4) -> Overhead distributing indices to chunks (in seconds): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 23));
		PRINT(params)("(I.B.5) -> Processing of whole chunk (all three phases, in seconds): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 24));
		PRINT(params)("(I.B.6) -> Processing of chunk before brute (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 25));
		PRINT(params)("(I.B.7) -> Processing of chunk after brute (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 26));
		PRINT(params)("(I.B.8) -> Processing of chunk after brute, buffer release (in seconds): \t%2.10f\n", GET_MY_TIMER(tree_record->timers + 27));
		PRINT(params)("(I.B.9) -> Number of release buffer calls: %i\n", tree_record->counters[0]);
	}
	if (USE_GPU) {

		PRINT(params)("(I.B.3)   -> Elapsed time for TEST_SUBSET (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 13));
		PRINT(params)("(I.B.4)   -> Elapsed time for NN Search (in seconds): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 14));
		PRINT(params)("(I.B.5)   -> Elapsed time for UPDATE (in seconds): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 15));
		PRINT(params)("(I.B.6)   -> Elapsed time for OVERHEAD (in seconds): \t\t\t\t%2.10f\n",
				GET_MY_TIMER(tree_record->timers + 12)
				- GET_MY_TIMER(tree_record->timers + 14)
		    	- GET_MY_TIMER(tree_record->timers + 15)
				- GET_MY_TIMER(tree_record->timers + 13));

	}

	PRINT(params)("(II)   FIND_LEAF_IDX_BATCH : \t\t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 16));
	PRINT(params)("(III) Elapsed time for final brute-force step : \t\t\t\t%2.10f\n\n",
			GET_MY_TIMER(tree_record->timers + 20));

	PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n");
	PRINT(params)("(DIFF) While - PROCESS_ALL_BUFFERS - FIND_LEAF_IDX_BATCH: \t\t\t%2.10f\n",
			GET_MY_TIMER(tree_record->timers + 2) - GET_MY_TIMER(tree_record->timers + 12)
					- GET_MY_TIMER(tree_record->timers + 16));
	PRINT(params)("(Overhead)  Elapsed time for AFTER WHILE : \t\t\t\t\t%2.10f\n",
			GET_MY_TIMER(tree_record->timers + 5));
	PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n\n");

	PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n");
	PRINT(params)("QUERY RUNTIME: %2.10f ", GET_MY_TIMER(tree_record->timers + 1));
	PRINT(params)("PROCESS_ALL_BUFFERS: %2.10f ", GET_MY_TIMER(tree_record->timers + 12));
	PRINT(params)("FIND_LEAF_IDX_BATCH: %2.10f ", GET_MY_TIMER(tree_record->timers + 16));
	PRINT(params)("WHILE_OVERHEAD: %2.10f ",
			GET_MY_TIMER(tree_record->timers + 2) - GET_MY_TIMER(tree_record->timers + 12)
					- GET_MY_TIMER(tree_record->timers + 16));
	PRINT(params)("\n");
	PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n");

	// free all allocated memory related to querying
	for (i = 0; i < tree_record->n_leaves; i++) {
		free(tree_record->buffers[i]);
	}
	free(tree_record->buffers);

	// free arrays
	free(tree_record->all_stacks);
	free(tree_record->all_depths);
	free(tree_record->all_idxs);
	free(all_next_indices);
	free(tree_record->leaf_indices_batch_ret_vals);

}
Esempio n. 5
0
static void *processing_loop_egress(void *arg) {
  pipeline_t *pipeline = (pipeline_t *) arg;
  circular_buffer_t *cb_in = pipeline->cb_in;

  //Added by Ming
#ifdef SWITCH_CPU_DEBUG
  int i;
#endif

#ifdef RATE_LIMITING
  struct timeval tv;
  gettimeofday(&tv, NULL);
  uint64_t next_deque = tv.tv_sec * 1000000 + tv.tv_usec + read_atomic_int(&USEC_INTERVAL);
#endif

  while(1) {
#ifdef RATE_LIMITING
    struct timeval tv;
    gettimeofday(&tv, NULL);
    uint64_t now_us = tv.tv_sec * 1000000 + tv.tv_usec;
    //RMT_LOG(P4_LOG_LEVEL_TRACE, "next_deque %lu, now_us %lu\n", next_deque, now_us);
    if(next_deque > now_us) {
      usleep(next_deque - now_us);
    }
    next_deque += read_atomic_int(&USEC_INTERVAL);
#endif
    egress_pkt_t *e_pkt = (egress_pkt_t *) cb_read(cb_in);
    if (e_pkt == NULL) continue;
    buffered_pkt_t *b_pkt = &e_pkt->pkt;
    RMT_LOG(P4_LOG_LEVEL_TRACE, "egress_pipeline: packet dequeued\n");

    phv_clean(pipeline->phv);

    pipeline->phv->packet_id = b_pkt->pkt_id;

    parser_parse_pkt(pipeline->phv,
		     b_pkt->pkt_data, b_pkt->pkt_len,
		     pipeline->parse_state_start);
    parser_parse_metadata(pipeline->phv,
			  e_pkt->metadata, e_pkt->metadata_recirc);
    assert(!fields_get_clone_spec(pipeline->phv));

//::  if enable_intrinsic:
   /* Set dequeue metadata */
    fields_set_deq_qdepth(pipeline->phv, cb_count(cb_in));
    uint64_t enq_timestamp = fields_get_enq_timestamp(pipeline->phv);
    fields_set_deq_timedelta(pipeline->phv, get_timestamp()-enq_timestamp);
//::  #endif

    fields_set_instance_type(pipeline->phv, e_pkt->pkt.instance_type);

    free(e_pkt->metadata);
    free(e_pkt->metadata_recirc);
    if(pipeline->table_entry_fn)  /* empty egress pipeline ? */
      pipeline->table_entry_fn(pipeline->phv);

    uint8_t *pkt_data;
    int pkt_len;

    /* EGRESS MIRRORING */
    if(fields_get_clone_spec(pipeline->phv)) {
      RMT_LOG(P4_LOG_LEVEL_VERBOSE, "Egress mirroring\n");
      pipeline->deparse_fn(pipeline->phv, &pkt_data, &pkt_len);
      egress_cloning(pipeline, pkt_data, pkt_len, e_pkt->pkt.pkt_id);
      fields_set_clone_spec(pipeline->phv, 0);
    }

    update_checksums(pipeline->phv);
    pipeline->deparse_fn(pipeline->phv, &pkt_data, &pkt_len);

    free(b_pkt->pkt_data);

//:: if "egress_drop_ctl" in extra_metadata_name_map:
      // program uses the separate egress_drop_ctl register
      // a non-zero value means drop
    if(pipeline->phv->deparser_drop_signal ||
       metadata_get_egress_drop_ctl(metadata)) {
//:: else:
    if(pipeline->phv->deparser_drop_signal){
//:: #endif
      RMT_LOG(P4_LOG_LEVEL_VERBOSE, "dropping packet at egress\n");
      free(e_pkt);
      continue;
    }

    int egress = fields_get_egress_port(pipeline->phv);

    if(pipeline->phv->truncated_length && (pipeline->phv->truncated_length < pkt_len))
      pkt_len = pipeline->phv->truncated_length;
   
#ifdef SWITCH_CPU_DEBUG
	RMT_LOG(P4_LOG_LEVEL_TRACE, "Ming Packet Data: %d\n", b_pkt->pkt_len);
	for (i = 0; i < 21; i++)
		RMT_LOG(P4_LOG_LEVEL_TRACE, "%x ", b_pkt->pkt_data[i]);
	RMT_LOG(P4_LOG_LEVEL_TRACE, "\n");
#endif

    pkt_manager_transmit(egress, pkt_data, pkt_len, b_pkt->pkt_id);
    free(e_pkt);

  }

  return NULL;
}

/* name has to be ingress or egress */
pipeline_t *pipeline_create(int id) {
  pipeline_t *pipeline = malloc(sizeof(pipeline_t));
  pipeline->name = "egress";
#ifdef RATE_LIMITING
  pipeline->cb_in = cb_init(read_atomic_int(&EGRESS_CB_SIZE), CB_WRITE_DROP, CB_READ_RETURN);
#else
  pipeline->cb_in = cb_init(read_atomic_int(&EGRESS_CB_SIZE), CB_WRITE_BLOCK, CB_READ_BLOCK);
#endif
  pipeline->parse_state_start = parse_state_start;
//:: if egress_entry_table is not None:
  pipeline->table_entry_fn = tables_apply_${egress_entry_table};
//:: else:
  pipeline->table_entry_fn = NULL;
//:: #endif
  pipeline->deparse_fn = deparser_produce_pkt;
  pipeline->phv = phv_init(NB_THREADS_PER_PIPELINE + id, RMT_PIPELINE_EGRESS);

  /* packet processing loop */
  pthread_create(&pipeline->processing_thread, NULL,
		 processing_loop_egress, (void *) pipeline);

  return pipeline;
}
Esempio n. 6
0
int
ping6_send(struct ping6 *ping)
{
   PACKET   pkt;
   struct icmp6req *pinghdr;
   char     addrbuf[40];   /* for printf()ing */
   int      plen = sizeof(struct icmp6req) + ping->length;
   int      err = 0;
   int      sendflags;
   int      bytesleft;
   int      offset;
   /* try for a pkt chain */
   LOCK_NET_RESOURCE(FREEQ_RESID);
   PK_ALLOC(pkt, plen + MaxLnh + sizeof(struct ipv6));
   UNLOCK_NET_RESOURCE(FREEQ_RESID); 

   if (pkt == NULL)
   {
      ping->count = 0;     /* mark session for deletion */
      return (ENP_NOBUFFER);
   }
   pkt->flags = 0;

   /* prepare for cb_read */
   pkt->nb_prot = pkt->nb_buff + MaxLnh + sizeof(struct ipv6);
 
   /* got chain? */
   if (pkt->pk_next == NULL)
      pkt->nb_plen = plen;       /* no */
   else
      pkt->nb_plen = pkt->nb_blen - MaxLnh - sizeof(struct ipv6);   /* yes */   

   /* Advance to point where we write the data */
   offset = sizeof(struct icmp6req);
   pkt->nb_tlen = offset;
   bytesleft = ping->length;
 
   while (bytesleft > PINGSTRSIZE)
   {
      err = cb_read(pkt, offset, (uint8_t *)pingdata6, PINGSTRSIZE);
      if (err < 0)
         break;
      offset += err;
      bytesleft -= err;
   }
   if (bytesleft && (err >= 0))
      err = cb_read(pkt, offset, (uint8_t *)pingdata6, bytesleft);
   /* read in data - user or standard? */
      
   /* got err? */
   if (err <= 0)
   {
      LOCK_NET_RESOURCE(FREEQ_RESID);  
      PK_FREE(pkt);
      UNLOCK_NET_RESOURCE(FREEQ_RESID);    
      ping->count = 0;     /* mark session for deletion */
      return (ENP_NOBUFFER);
   }    

#ifdef IP6_ROUTING
   /* Put scopeID in pkt */
   pkt->soxopts = npalloc(sizeof(struct ip_socopts));
   if (pkt->soxopts == NULL)
   {
      LOCK_NET_RESOURCE(FREEQ_RESID);
      pk_free(pkt);
      UNLOCK_NET_RESOURCE(FREEQ_RESID);      
      ping->count = 0;     /* mark session for deletion */
      return (ENP_NOBUFFER);
   }
   pkt->soxopts->ip_scopeid = ping->scopeID;
#endif

   pinghdr = (struct icmp6req *)pkt->nb_prot;
   pinghdr->code = 0;
   pinghdr->type = ICMP6_ECHOREQ;
   pinghdr->id = ping->sess_id;
   pinghdr->sequence = (htons((unshort)ping->sent));
   ping->sent++;
   pkt->net = ping->net;

   pkt->type = htons(0x86dd);

   /* multicast ping? */
   if (ping->fhost.addr[0] == 0xFF)
   {
      pkt->flags |= PKF_MCAST;  /* send mac multicast */
      sendflags = 0; /* no routing */
   }
   else
   {
      pkt->flags &= ~PKF_MCAST;  /* send mac unicast */
      /* see if we can skip the routing step */
      if(pkt->net && (!IP6EQ(&ping->nexthop, &ip6unspecified)))
      {
        pkt->nexthop = &ping->nexthop;   /* set next hop */
        sendflags = IP6F_NOROUTE;
      }
      else
        sendflags = IP6F_ALL;
   }
   /* is the scope global? */
   if ( (ping->fhost.addr[0] == 0xFF) && ((ping->fhost.addr[1] & 0xF) == 0xE) )
      {
         /* yup - it's a global ping */         
        pkt->flags |= PKF_IPV6_GBL_PING;  /* global ping */        
      }

   /* loopback? */
   if (IN6_IS_ADDR_LOOPBACK((struct in6_addr *)&(ping->fhost.addr)))
      IP6CPY((struct in6_addr *)&(ping->lhost.addr), 
             (struct in6_addr *)&(ping->fhost.addr));   /* both are loopback */

   /* put prot at IPv6 hdr */
   pkt->nb_prot -= sizeof(struct ipv6);
   pkt->nb_plen += sizeof(struct ipv6);
   pkt->nb_tlen += sizeof(struct ipv6);
  
   /* set time for next ping */
   ping->nextping = TIME_ADD(CTICKS, ping->ping6_interval); 
 
   err = icmp6_send(pkt, &ping->lhost, &ping->fhost, sendflags);
   pkt->net->icmp6_ifmib.OutEchos++;
   
   if (err < 0)
   {
      /* Don't record gio error, since we're going to kill this
       * ping session anyway.
       */
      gio_printf(ping->gio, "error %d sending ping; sess %d, seq:%d\n",
                 err, ntohs(ping->sess_id), ping->sent);
      ping->count = 0;     /* mark session for deletion by timer */
   }
   else if ((err == 1) || (err == 0))
   {
      err = gio_printf(ping->gio, "Sent ping; sess: %d, Seq: %d to %s\n",
                       ntohs(ping->sess_id), ping->sent,
                       print_ip6(&ping->fhost, addrbuf));
   }
   return (0);
}