int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
        uint64_t max_addr,
        uint64_t alignment,
        char *name,
        uint32_t flags)
{
    int64_t addr_allocated;
    struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;

#ifdef DEBUG
    cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
                 "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
                 (unsigned long long)size,
                 (unsigned long long)min_addr,
                 (unsigned long long)max_addr,
                 (unsigned long long)alignment,
                 name);
#endif
    if (cvmx_bootmem_desc->major_version != 3) {
        cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
                     "%d.%d at addr: %p\n",
                     (int)cvmx_bootmem_desc->major_version,
                     (int)cvmx_bootmem_desc->minor_version,
                     cvmx_bootmem_desc);
        return -1;
    }

    if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
        cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));


    named_block_desc_ptr =
        cvmx_bootmem_phy_named_block_find(NULL,
                                          flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);

    if (cvmx_bootmem_phy_named_block_find(name,
                                          flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
        if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
            cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
        return -1;
    }


    size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);

    addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
                                            alignment,
                                            flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
    if (addr_allocated >= 0) {
        named_block_desc_ptr->base_addr = addr_allocated;
        named_block_desc_ptr->size = size;
        strncpy(named_block_desc_ptr->name, name,
                cvmx_bootmem_desc->named_block_name_len);
        named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
    }

    if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
        cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
    return addr_allocated;
}
Пример #2
0
void *_sos_mem_alloc(uint32_t size_type)
{
	sos_mem_block_Cfg_t *dst;
	struct list_head *l;
	uint32_t index;
	sos_mem_slice_head_t *slice;
	
	dst = &sos_mem_pool->sos_mem_block_region.sos_mem_block_cfg[size_type];

	index = cvmx_atomic_fetch_and_add32_nosync(&dst->global_index, 1);
	index = index & (SOS_MEM_CHAIN_INTERNAL_NUM - 1);

	cvmx_spinlock_lock(&dst->msc[index].chain_lock);
	if(list_empty(&dst->msc[index].head))
	{
		cvmx_spinlock_unlock(&dst->msc[index].chain_lock);
		return NULL;
	}
	
	l = dst->msc[index].head.next;
	list_del(l);
	dst->msc[index].freenum--;
	cvmx_spinlock_unlock(&dst->msc[index].chain_lock);
	
	slice = container_of(l, sos_mem_slice_head_t, list);
	if(slice->ref != 0)
	{
		printf("slice ref alloc error %d, %p\n", slice->ref, slice);
		return NULL;
	}
	slice->ref = 1;
	return (void *)((uint8_t *)slice + SOS_MEM_SLICE_HEAD_SIZE);
}
Пример #3
0
void sos_mem_block_replenish(sos_mem_pool_region_t *psosmp, uint32_t size_id, void *start, uint32_t size, int init_num)
{
	int i,j;
	int numperchain = init_num/SOS_MEM_CHAIN_INTERNAL_NUM;

	sos_mem_slice_head_t *head;
	void *begin;
	sos_mem_block_Chain *bc;

	for(i = 0; i < SOS_MEM_CHAIN_INTERNAL_NUM; i++)
	{
		bc = &psosmp->sos_mem_block_region.sos_mem_block_cfg[size_id].msc[i];
		cvmx_spinlock_lock(&bc->chain_lock);

		bc->totalnum += init_num;
		bc->freenum += init_num;

		for(j = 0; j < numperchain; j++)
		{
			begin = (void *)((uint8_t *)start + i*j*size);
			head = (sos_mem_slice_head_t *)begin;
			head->headmagic = SOS_MEM_HEAD_MAGIC;
			head->subchain_id = i;
			head->size_type = size_id;
			*(uint32_t *)((uint8_t *)head + SOS_MEM_SLICE_HEAD_SIZE + sos_mem_size[size_id].size) = SOS_MEM_TAIL_MAGIC;
			list_add(&head->list, &bc->head);
		}
		
		cvmx_spinlock_unlock(&bc->chain_lock);
	}
}
Пример #4
0
int uart_printf( int uart_index, const char *format, ... )
{
	char buffer[ 1024 ];
	va_list args;

	va_start( args, format );
	int result = vsnprintf( buffer, sizeof( buffer ), format, args );
	va_end( args );

	int i = result;
	char *ptr = buffer;

	cvmx_spinlock_lock( &uart_printf_lock );
	while ( i > 0 )
	{
		if ( *ptr == '\n' )
		{
			uart_write_byte( uart_index, '\r' );
		}
		uart_write_byte( uart_index, *ptr );
		ptr++;
		i--;
	}
	cvmx_spinlock_unlock( &uart_printf_lock );

	return result;
}
Пример #5
0
int sos_mem_replenish(int size_type)
{
	int initnum;
	int slicerawsize;
	int slicewholesize;
	uint32_t requestsize;
	int block_used;
	void *start;

	initnum = sos_mem_size[size_type].init_num;
	slicerawsize = sos_mem_size[size_type].size;

	slicewholesize = slicerawsize + SOS_MEM_SLICE_HEAD_SIZE + SOS_MEM_TAIL_MAGIC_SIZE;
	requestsize = slicewholesize * initnum;

	cvmx_spinlock_lock(&sos_mem_pool->region_lock);

	if( SOS_MEM_BLOCK_MAX == sos_mem_pool->block_num )
	{
		cvmx_spinlock_unlock(&sos_mem_pool->region_lock);
		return SEC_NO;
	}

	if( requestsize > sos_mem_pool->current_size )
	{
		cvmx_spinlock_unlock(&sos_mem_pool->region_lock);
		return SEC_NO;
	}

	start = sos_mem_pool->current_start;

	block_used = sos_mem_pool->block_num;
	sos_mem_pool->sos_mem_block[block_used].size_type = size_type;
	sos_mem_pool->sos_mem_block[block_used].start = start;
	sos_mem_pool->sos_mem_block[block_used].len = requestsize;
	sos_mem_pool->block_num = block_used + 1;

	sos_mem_block_replenish(sos_mem_pool, size_type, start, slicewholesize, initnum);

	sos_mem_pool->current_start = (void *)((uint8_t *)start + requestsize);
	sos_mem_pool->current_size = sos_mem_pool->current_size - requestsize;
	
	cvmx_spinlock_unlock(&sos_mem_pool->region_lock);

	return SEC_OK;
}
Пример #6
0
/**
 * Activate the current application core for receiving hotplug shutdown requests.
 *
 * This routine makes sure that each core belonging to the application is enabled 
 * to receive the shutdown notification and also provides a barrier sync to make
 * sure that all cores are ready. 
 */
int cvmx_app_hotplug_activate(void)
{
    uint64_t cnt = 0;
    uint64_t cnt_interval = 10000000;

    while (!cvmx_app_hotplug_info_ptr) 
    {
        cnt++;
        if ((cnt % cnt_interval) == 0)
            printf("waiting for cnt=%lld\n", (unsigned long long)cnt);
    }

    if (cvmx_app_hotplug_info_ptr->hplugged_cores & (1ull << cvmx_get_core_num()))
    {
#ifdef DEBUG
        printf("core=%d : is being hotplugged \n", cvmx_get_core_num());
#endif
        cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
        sys_info_ptr->core_mask |= 1ull << cvmx_get_core_num();
    }
    else
    {
        __cvmx_app_hotplug_sync();
    }
    cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
    if (!cvmx_app_hotplug_info_ptr)
    {
        cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
        printf("ERROR: This application is not registered for hotplug\n");
        return -1;
    }
    /* Enable the interrupt before we mark the core as activated */
    cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
    cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1ull<<cvmx_get_core_num());

#ifdef DEBUG
    printf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n", 
                 cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid, 
                 sizeof(*cvmx_app_hotplug_info_ptr));
#endif

    cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);

    return 0;
}
Пример #7
0
void cvmx_sysinfo_add_self_to_core_mask(void)
{
    int core = cvmx_get_core_num();
    uint32_t core_mask = 1 << core;
    
    cvmx_spinlock_lock(&state.lock);
    state.sysinfo.core_mask = state.sysinfo.core_mask | core_mask;
    cvmx_spinlock_unlock(&state.lock);
}
Пример #8
0
void cvmx_sysinfo_remove_self_from_core_mask(void)
{
    int core = cvmx_get_core_num();
    uint32_t core_mask = 1 << core;
    
    cvmx_spinlock_lock(&state.lock);
    state.sysinfo.core_mask = state.sysinfo.core_mask & ~core_mask;
    cvmx_spinlock_unlock(&state.lock);
}
Пример #9
0
void cvmx_app_hotplug_remove_self_from_core_mask(void)
{
    int core = cvmx_get_core_num();
    uint32_t core_mask = 1ull << core;

    cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
    cvmx_app_hotplug_info_ptr->coremask = cvmx_app_hotplug_info_ptr->coremask & ~core_mask ;
    cvmx_app_hotplug_info_ptr->hotplug_activated_coremask =
        cvmx_app_hotplug_info_ptr->hotplug_activated_coremask & ~core_mask ;
    cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
}
void cvmx_zone_free(cvmx_zone_t zone, void *ptr)
{
	
    assert(zone != NULL);
    assert(zone->baseptr != NULL);
    assert((unsigned long)ptr - (unsigned long)zone->baseptr < zone->num_elem * zone->elem_size);

    cvmx_spinlock_lock(&zone->lock);
	*(void **)ptr = zone->freelist;
	zone->freelist = ptr;
    cvmx_spinlock_unlock(&zone->lock);
}
/**
 * Simulator magic is not supported in user mode under Linux.
 * This version of simprintf simply calls the underlying C
 * library printf for output. It also makes sure that two
 * calls to simprintf provide atomic output.
 *
 * @param fmt    Format string in the same format as printf.
 */
void simprintf(const char *fmt, ...)
{
    CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
    va_list ap;

    cvmx_spinlock_lock(&simprintf_lock);
    printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num());
    va_start(ap, fmt);
    vprintf(fmt, ap);
    va_end(ap);
    cvmx_spinlock_unlock(&simprintf_lock);
}
/**
 * Wait (stall) until all cores in the given coremask has reached this point
 * in the program execution before proceeding.
 *
 * @param  coremask  the group of cores performing the barrier sync
 *
 */
void cvmx_coremask_barrier_sync(const cvmx_coremask_t *pcm)
{
	int i;
	unsigned int target;
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
	assert(pcm != NULL && !((long)pcm & 3));
#endif
	cvmx_spinlock_lock(&state.lock);

	for (i = 0; i < CVMX_COREMASK_MAX_SYNCS; i++) {

		if (cvmx_coremask_is_empty(&state.s[i].coremask)) {
			/* end of existing coremask list, create new entry, fall-thru */
			cvmx_coremask_copy(&state.s[i].coremask, pcm);
		}

		if (cvmx_coremask_cmp(&state.s[i].coremask, pcm) == 0) {

			target = state.s[i].exit + 1;	/* wrap-around at 32b */

			cvmx_coremask_set_self(&state.s[i].checkin);

			if (cvmx_coremask_cmp(&state.s[i].checkin, pcm) == 0) {
				cvmx_coremask_clear_all(&state.s[i].checkin);
				state.s[i].exit = target;	/* signal exit condition */
			}
			cvmx_spinlock_unlock(&state.lock);

			while (state.s[i].exit != target) ;

			return;
		}
	}

	/* error condition - coremask array overflowed */
	cvmx_spinlock_unlock(&state.lock);
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
	assert(0);
#endif
}
Пример #13
0
/**
 * Wait (stall) until all cores in the given coremask has reached this point
 * in the program execution before proceeding.
 *
 * @param  coremask  the group of cores performing the barrier sync
 *
 */
void cvmx_coremask_barrier_sync(unsigned int coremask)
{
    int i;
    unsigned int target;

    assert(coremask != 0);

    cvmx_spinlock_lock(&state.lock);

    for (i = 0; i < CVMX_COREMASK_MAX_SYNCS; i++) {

        if (state.s[i].coremask == 0) {
            /* end of existing coremask list, create new entry, fall-thru */
            state.s[i].coremask = coremask;
        }

        if (state.s[i].coremask == coremask) {

            target = state.s[i].exit + 1;  /* wrap-around at 32b */

            state.s[i].checkin |= cvmx_coremask_core(cvmx_get_core_num());
            if (state.s[i].checkin == coremask) {
                state.s[i].checkin = 0;
                state.s[i].exit = target;  /* signal exit condition */
            }
            cvmx_spinlock_unlock(&state.lock);

            while (state.s[i].exit != target)
                ;

            return;
        }
    }

    /* error condition - coremask array overflowed */
    cvmx_spinlock_unlock(&state.lock);
    assert(0);
}
Пример #14
0
int uart_prints( int uart_index, char *buffer, int len)
{
	cvmx_spinlock_lock( &uart_printf_lock );
	while ( len > 0 ) {
		if ( *buffer == '\n' ) {
			uart_write_byte( uart_index, '\r' );
		}
		uart_write_byte( uart_index, *buffer );
		buffer++;
		len--;
	}
	cvmx_spinlock_unlock( &uart_printf_lock );
	return len;
}
Пример #15
0
void sos_mem_free(void *p)
{
	sos_mem_slice_head_t *slice;
	uint32_t subchain_id;
	uint32_t size_type;
	sos_mem_block_Cfg_t *dst;
	slice = (sos_mem_slice_head_t *)((uint8_t *)p - SOS_MEM_SLICE_HEAD_SIZE);

	if(slice->headmagic != SOS_MEM_HEAD_MAGIC)
	{
		printf("buf %p has been destroyed!\n", slice);
		return;
	}

	subchain_id = slice->subchain_id;
	if(subchain_id >= SOS_MEM_CHAIN_INTERNAL_NUM)
	{
		printf("buf %p has been destroyed!\n", slice);
		return;
	}

	size_type = slice->size_type;
	if( size_type > SOS_SIZE_NUM )
	{
		printf("buf %p has been destroyed!\n", slice);
		return;
	}

	if(slice->ref != 1)
	{
		printf("slice ref free error %d, %p\n", slice->ref, slice);
		return;
	}
	slice->ref = 0;


	dst = &sos_mem_pool->sos_mem_block_region.sos_mem_block_cfg[size_type];

	cvmx_spinlock_lock(&dst->msc[subchain_id].chain_lock);
	list_add(&slice->list, &dst->msc[subchain_id].head);
	dst->msc[subchain_id].freenum++;
	cvmx_spinlock_unlock(&dst->msc[subchain_id].chain_lock);

	return;
	
}
Пример #16
0
void *cvmx_zone_alloc(cvmx_zone_t zone, uint32_t flags)
{
	cvmx_zone_t item;

	assert(zone != NULL);
	assert(zone->baseptr != NULL);
	cvmx_spinlock_lock(&zone->lock);

	item = (cvmx_zone_t) zone->freelist;
	if (item != NULL) {
		zone->freelist = *(void **)item;
	} else {
//              cvmx_dprintf("No more elements in zone %s\n", zone->name);
	}

	cvmx_spinlock_unlock(&zone->lock);
	return (item);
}
Пример #17
0
int
main (void)
{
  CVMX_SHARED static cvmx_spinlock_t core_lock = 
			CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
  int j;
  cvmx_sysinfo_t *sysinfo;

  sysinfo = cvmx_sysinfo_get();

  for (j = 0; j < 4; j++)
    {
      /* Used to sync up the cores, otherwise the same core hits the hardware
	 watchpoint again and again on continue commands.  */
      cvmx_coremask_barrier_sync (&sysinfo->core_mask);
      /* Used to control the sequence of the program. There are chances of 
         hitting the hardware breakpoint by both the cores at the same time. */
      cvmx_spinlock_lock (&core_lock);
      foo ();
      cvmx_spinlock_unlock (&core_lock);
    }

  while (1); /* set common breakpoint here */
}
/**
 * Main entrypoint of the application. Here we setup shared
 * memory and fork processes for each cpu. This simulates the
 * normal simple executive environment of one process per
 * cpu core.
 *
 * @param argc   Number of command line arguments
 * @param argv   The command line arguments
 * @return Return value for the process
 */
int main(int argc, const char *argv[])
{
    CVMX_SHARED static cvmx_spinlock_t mask_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
    CVMX_SHARED static int32_t pending_fork;
    unsigned long cpumask;
    unsigned long cpu;

    setup_system_info();

    if (sizeof(void*) == 4)
    {
        if (linux_mem32_min)
            setup_reserve32();
        else
        {
            printf("\nFailed to access 32bit shared memory region. Most likely the Kernel\n"
                   "has not been configured for 32bit shared memory access. Check the\n"
                   "kernel configuration.\n"
                   "Aborting...\n\n");
            exit(-1);
        }
    }

    setup_cvmx_shared();

    /* Check to make sure the Chip version matches the configured version */
    octeon_model_version_check(cvmx_app_init_processor_id);

    /* Get the list of logical cpus we should run on */
    if (sched_getaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
    {
        perror("sched_getaffinity failed");
        exit(errno);
    }

    cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();

    cvmx_atomic_set32(&pending_fork, 1);
    for (cpu=0; cpu<16; cpu++)
    {
        if (cpumask & (1<<cpu))
        {
            /* Turn off the bit for this CPU number. We've counted him */
            cpumask ^= (1<<cpu);
            /* If this is the last CPU to run on, use this process instead of forking another one */
            if (cpumask == 0)
                break;
            /* Increment the number of CPUs running this app */
            cvmx_atomic_add32(&pending_fork, 1);
            /* Fork a process for the new CPU */
            int pid = fork();
            if (pid == 0)
            {
                break;
            }
            else if (pid == -1)
            {
                perror("Fork failed");
                exit(errno);
            }
        }
    }

    /* Set affinity to lock me to the correct CPU */
    cpumask = (1<<cpu);
    if (sched_setaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
    {
        perror("sched_setaffinity failed");
        exit(errno);
    }

    cvmx_spinlock_lock(&mask_lock);
    system_info->core_mask |= 1<<cvmx_get_core_num();
    cvmx_atomic_add32(&pending_fork, -1);
    if (cvmx_atomic_get32(&pending_fork) == 0)
        cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask);
    cvmx_spinlock_unlock(&mask_lock);

    /* Spinning waiting for forks to complete */
    while (cvmx_atomic_get32(&pending_fork)) {}

    cvmx_coremask_barrier_sync(system_info->core_mask);

    int result = appmain(argc, argv);

    shutdown_cvmx_shared();

    return result;
}
void cvmx_bootmem_unlock(void)
{
    cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
}
Пример #20
0
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
					   uint64_t max_addr,
					   uint64_t alignment,
					   char *name,
					   uint32_t flags)
{
	int64_t addr_allocated;
	struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;

#ifdef DEBUG
	cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
		     "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
		     (unsigned long long)size,
		     (unsigned long long)min_addr,
		     (unsigned long long)max_addr,
		     (unsigned long long)alignment,
		     name);
#endif
	if (cvmx_bootmem_desc->major_version != 3) {
		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
			     "%d.%d at addr: %p\n",
			     (int)cvmx_bootmem_desc->major_version,
			     (int)cvmx_bootmem_desc->minor_version,
			     cvmx_bootmem_desc);
		return -1;
	}

	/*
	 * Take lock here, as name lookup/block alloc/name add need to
	 * be atomic.
	 */
	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));

	/* Get pointer to first available named block descriptor */
	named_block_desc_ptr =
		cvmx_bootmem_phy_named_block_find(NULL,
						  flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);

	/*
	 * Check to see if name already in use, return error if name
	 * not available or no more room for blocks.
	 */
	if (cvmx_bootmem_phy_named_block_find(name,
					      flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
		if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
			cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
		return -1;
	}


	/*
	 * Round size up to mult of minimum alignment bytes We need
	 * the actual size allocated to allow for blocks to be
	 * coallesced when they are freed.  The alloc routine does the
	 * same rounding up on all allocations.
	 */
	size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);

	addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
						alignment,
						flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
	if (addr_allocated >= 0) {
		named_block_desc_ptr->base_addr = addr_allocated;
		named_block_desc_ptr->size = size;
		strncpy(named_block_desc_ptr->name, name,
			cvmx_bootmem_desc->named_block_name_len);
		named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0;
	}

	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
		cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
	return addr_allocated;
}
Пример #21
0
/**
 * Default exception handler. Prints out the exception
 * cause decode and all relevant registers.
 *
 * @param registers Registers at time of the exception
 */
static void cvmx_interrupt_default_exception_handler(uint64_t registers[32])
{
    uint64_t trap_print_cause;

    ebt3000_str_write("Trap");
    cvmx_spinlock_lock(&cvmx_interrupt_default_lock);
    safe_printf("******************************************************************\n");
    safe_printf("Core %lu: Unhandled Exception. Cause register decodes to:\n", cvmx_get_core_num());
    READ_COP0(trap_print_cause, COP0_CAUSE);
    switch ((trap_print_cause >> 2) & 0x1f)
    {
        case 0x0:
            safe_printf("Interrupt\n");
            break;
        case 0x1:
            safe_printf("TLB Mod\n");
            break;
        case 0x2:
            safe_printf("tlb load/fetch\n");
            break;
        case 0x3:
            safe_printf("tlb store\n");
            break;
        case 0x4:
            safe_printf("address exc, load/fetch\n");
            break;
        case 0x5:
            safe_printf("address exc, store\n");
            break;
        case 0x6:
            safe_printf("bus error, inst. fetch\n");
            break;
        case 0x7:
            safe_printf("bus error, load/store\n");
            break;
        case 0x8:
            safe_printf("syscall\n");
            break;
        case 0x9:
            safe_printf("breakpoint \n");
            break;
        case 0xa:
            safe_printf("reserved instruction\n");
            break;
        case 0xb:
            safe_printf("cop unusable\n");
            break;
        case 0xc:
            safe_printf("arithmetic overflow\n");
            break;
        case 0xd:
            safe_printf("trap\n");
            break;
        case 0xf:
            safe_printf("floating point exc\n");
            break;
        case 0x12:
            safe_printf("cop2 exception\n");
            break;
        case 0x16:
            safe_printf("mdmx unusable\n");
            break;
        case 0x17:
            safe_printf("watch\n");
            break;
        case 0x18:
            safe_printf("machine check\n");
            break;
        case 0x1e:
            safe_printf("cache error\n");
            break;
        default:
            safe_printf("Reserved exception cause.\n");
            break;

    }

    safe_printf("******************************************************************\n");
    cvmx_interrupt_dump_registers(registers);
    safe_printf("******************************************************************\n");
    cvmx_spinlock_unlock(&cvmx_interrupt_default_lock);

    while (1)
    {
 	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
            asm volatile ("break 1");
        else
            asm volatile ("wait");
    }