static uword allocate_per_cpu_mheap (uword cpu) { clib_smp_main_t * m = &clib_smp_main; void * heap; uword vm_size, stack_size, mheap_flags; ASSERT (os_get_cpu_number () == cpu); vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes; stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes; mheap_flags = MHEAP_FLAG_SMALL_OBJECT_CACHE; /* Heap extends up to start of stack. */ heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu), vm_size - stack_size, mheap_flags); clib_mem_set_heap (heap); if (cpu == 0) { /* Now that we have a heap, allocate main structure on cpu 0. */ vec_resize (m->per_cpu_mains, m->n_cpus); /* Allocate shared global heap (thread safe). */ m->global_heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu + m->n_cpus), vm_size, mheap_flags | MHEAP_FLAG_THREAD_SAFE); } m->per_cpu_mains[cpu].heap = heap; return 0; }
/* Reserves given number of error codes for given node. */ void vlib_register_errors (vlib_main_t * vm, u32 node_index, u32 n_errors, char *error_strings[]) { vlib_error_main_t *em = &vm->error_main; vlib_node_t *n = vlib_get_node (vm, node_index); uword l; ASSERT (os_get_cpu_number () == 0); /* Free up any previous error strings. */ if (n->n_errors > 0) heap_dealloc (em->error_strings_heap, n->error_heap_handle); n->n_errors = n_errors; n->error_strings = error_strings; if (n_errors == 0) return; n->error_heap_index = heap_alloc (em->error_strings_heap, n_errors, n->error_heap_handle); l = vec_len (em->error_strings_heap); clib_memcpy (vec_elt_at_index (em->error_strings_heap, n->error_heap_index), error_strings, n_errors * sizeof (error_strings[0])); /* Allocate a counter/elog type for each error. */ vec_validate (em->counters, l - 1); vec_validate (vm->error_elog_event_types, l - 1); /* Zero counters for re-registrations of errors. */ if (n->error_heap_index + n_errors <= vec_len (em->counters_last_clear)) clib_memcpy (em->counters + n->error_heap_index, em->counters_last_clear + n->error_heap_index, n_errors * sizeof (em->counters[0])); else memset (em->counters + n->error_heap_index, 0, n_errors * sizeof (em->counters[0])); { elog_event_type_t t; uword i; memset (&t, 0, sizeof (t)); for (i = 0; i < n_errors; i++) { t.format = (char *) format (0, "%v %s: %%d", n->name, error_strings[i]); vm->error_elog_event_types[n->error_heap_index + i] = t; } } }
void cj_log (u32 type, void * data0, void * data1) { u64 new_tail; cj_main_t * cjm = &cj_main; cj_record_t * r; if (cjm->enable == 0) return; new_tail = __sync_add_and_fetch (&cjm->tail, 1); r = (cj_record_t *) &(cjm->records[new_tail & (cjm->num_records - 1)]); r->time = vlib_time_now (cjm->vlib_main); r->cpu = os_get_cpu_number(); r->type = type; r->data[0] = (u64) data0; r->data[1] = (u64) data1; }