void _mcount(void) { int ints_enabled; HAL_SMP_CPU_TYPE this_cpu; HAL_DISABLE_INTERRUPTS(ints_enabled); // This cpu is now not going to run any other code. So, did it // already own the spinlock? this_cpu = HAL_SMP_CPU_THIS(); if (mcount_cpu != this_cpu) { // Nope, so this cannot be a nested call to mcount() HAL_SPINLOCK_SPIN(mcount_lock); // And no other cpu is executing inside mcount() either mcount_cpu = this_cpu; // A possibly-recursive call is now safe. __profile_mcount((CYG_ADDRWORD)__builtin_return_address(1), (CYG_ADDRWORD)__builtin_return_address(0)); // All done. mcount_cpu = HAL_SMP_CPU_NONE; HAL_SPINLOCK_CLEAR(mcount_lock); } HAL_RESTORE_INTERRUPTS(ints_enabled); }
__externC CYG_WORD32 cyg_hal_cpu_message_dsr(CYG_WORD32 vector, CYG_ADDRWORD data) { struct smp_msg_t *m = &smp_msg[HAL_SMP_CPU_THIS()]; CYG_WORD32 reschedule; #ifdef CYGSEM_KERNEL_SCHED_TIMESLICE CYG_WORD32 timeslice; #endif cyg_drv_interrupt_mask(vector); HAL_SPINLOCK_SPIN(m->lock); reschedule = m->reschedule; #ifdef CYGSEM_KERNEL_SCHED_TIMESLICE timeslice = m->timeslice; m->reschedule = m->timeslice = false; #else m->reschedule = false; #endif HAL_SPINLOCK_CLEAR(m->lock); cyg_drv_interrupt_unmask(vector); if (reschedule) { cyg_scheduler_set_need_reschedule(); } #ifdef CYGSEM_KERNEL_SCHED_TIMESLICE if (timeslice) { cyg_scheduler_timeslice_cpu(); } #endif return 0; }
__externC CYG_WORD32 cyg_hal_cpu_message_isr(CYG_WORD32 vector, CYG_ADDRWORD data) { struct smp_msg_t *m; m = &smp_msg[HAL_SMP_CPU_THIS()]; cyg_drv_interrupt_mask(vector); HAL_SPINLOCK_SPIN( m->lock ); cyg_drv_interrupt_acknowledge(vector); CYG_WORD32 ret = 1; if (m->reschedule) m->reschedule_count++; if (m->timeslice) m->timeslice_count++; if (m->reschedule || m->timeslice) ret |= 2; /* Call DSR */ while (m->head != m->tail) { CYG_WORD32 msg = m->msgs[m->head]; switch (msg & HAL_SMP_MESSAGE_TYPE) { case HAL_SMP_MESSAGE_RESCHEDULE: ret |= 2; /* Call DSR */ break; case HAL_SMP_MESSAGE_MASK: case HAL_SMP_MESSAGE_UNMASK: case HAL_SMP_MESSAGE_REVECTOR: break; } /* Update the head pointer after handling the message, so that * the wait in cyg_hal_cpu_message() completes after the action * requested. */ m->head = (m->head + 1) & (SMP_MSGBUF_SIZE-1); } HAL_SPINLOCK_CLEAR(m->lock); cyg_drv_interrupt_unmask(vector); return ret; }
__externC void cyg_hal_cpu_message(HAL_SMP_CPU_TYPE cpu, CYG_WORD32 msg, CYG_WORD32 arg, CYG_WORD32 wait) { struct smp_msg_t *m = &smp_msg[cpu]; CYG_INTERRUPT_STATE old_ints; /* This only works because we are assigning the vector by CPU number */ HAL_DISABLE_INTERRUPTS(old_ints); /* Get access to the message buffer for the selected CPU */ HAL_SPINLOCK_SPIN(m->lock); if (msg == HAL_SMP_MESSAGE_RESCHEDULE) { m->reschedule = true; } else if (msg == HAL_SMP_MESSAGE_TIMESLICE) { m->timeslice = true; } else { CYG_WORD32 next = (m->tail + 1) & (SMP_MSGBUF_SIZE-1); /* If the buffer is full, wait for space to appear in it. * This should only need to be done very rarely. */ while (next == m->head) { HAL_SPINLOCK_CLEAR(m->lock); HAL_SPINLOCK_SPIN(m->lock); } m->msgs[m->tail] = msg | arg; m->tail = next; } /* Now send an interrupt to the CPU */ if (cyg_hal_smp_cpu_running[cpu]) { cyg_uint32 sgir = 0x0; /* Set target list */ sgir |= ((0x1 << cpu) << 16); /* Set vector */ sgir |= (CYGNUM_HAL_SMP_CPU_INTERRUPT_VECTOR(cpu) & 0xF); /* Send the interrupt */ HAL_WRITE_UINT32(XC7Z_ICD_SGIR_BASEADDR, sgir); } HAL_SPINLOCK_CLEAR(m->lock); /* If we are expected to wait for the command to complete, then * spin here until it does. We actually wait for the destination * CPU to empty its input buffer. So we might wait for messages * from other CPUs as well. But this is benign. */ while (wait) { HAL_SPINLOCK_SPIN( m->lock ); if (m->head == m->tail) wait = false; HAL_SPINLOCK_CLEAR(m->lock); } HAL_RESTORE_INTERRUPTS( old_ints ); }