/* * SCC1 interrupt handler */ static rtems_isr m360Enet_interrupt_handler (rtems_vector_number v) { /* * Frame received? */ if ((m360.scc1.sccm & 0x8) && (m360.scc1.scce & 0x8)) { m360.scc1.scce = 0x8; m360.scc1.sccm &= ~0x8; scc_softc[0].rxInterrupts++; rtems_event_send (scc_softc[0].rxDaemonTid, INTERRUPT_EVENT); } /* * Buffer transmitted or transmitter error? */ if ((m360.scc1.sccm & 0x12) && (m360.scc1.scce & 0x12)) { m360.scc1.scce = 0x12; m360.scc1.sccm &= ~0x12; scc_softc[0].txInterrupts++; rtems_event_send (scc_softc[0].txDaemonTid, INTERRUPT_EVENT); } m360.cisr = 1UL << 30; /* Clear SCC1 interrupt-in-service bit */ }
void bfin_ethernet_txdma_isr(int vector) { struct bfin_ethernetSoftc *sc; void *txdmaBase; uint16_t status; int i; for (i = 0; i < N_BFIN_ETHERNET; i++) { sc = ðernetSoftc[i]; txdmaBase = sc->txdmaBase; status = BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET); if (status & DMA_IRQ_STATUS_DMA_DONE) rtems_event_send (sc->txDaemonTid, INTERRUPT_EVENT); BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) = status; } }
static void reset_timer_or_finish(test_context *self, rtems_id timer) { rtems_status_code sc; int i = self->iteration_counter; if (i < ITERATION_COUNT) { self->iteration_counter = i + 1; sc = rtems_timer_reset(timer); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } else { sc = rtems_event_send(self->control_task, FINISH_EVENT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } }
rtems_status_code rtems_chain_prepend_with_notification( rtems_chain_control *chain, rtems_chain_node *node, rtems_id task, rtems_event_set events ) { rtems_status_code sc = RTEMS_SUCCESSFUL; bool was_empty = rtems_chain_prepend_with_empty_check( chain, node ); if (was_empty) { sc = rtems_event_send( task, events ); } return sc; }
static rtems_timer_service_routine test_event_with_timeout_from_isr( rtems_id timer, void *arg ) { rtems_status_code status; if ( is_case_hit() ) { /* * We want to catch the task while it is blocking. Otherwise * just send and make it happy. */ case_hit = true; } status = rtems_event_send( main_task, 0x01 ); fatal_directive_check_status_only( status, RTEMS_SUCCESSFUL, "event send" ); }
rtems_timer_service_routine test_event_with_timeout_from_isr( rtems_id timer, void *arg ) { rtems_status_code status; if ( _Event_Sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED ) { /* * We want to catch the task while it is blocking. Otherwise * just send and make it happy. */ case_hit = TRUE; } status = rtems_event_send( main_task, 0x01 ); fatal_directive_check_status_only( status, RTEMS_SUCCESSFUL, "event send" ); }
static rtems_timer_service_routine timerService(rtems_id id, void* arg) { static int n; static uint64_t then; uint64_t now; TscTestData *argp = (TscTestData *)arg; rdtscll(now); argp->buf[n++] = (uint32_t)(now-then); then = now; if(n < argp->bufsize) { rtems_timer_reset(id); } else { rtems_event_send(argp->taskID, RTEMS_EVENT_13); destroyTimer(id); } }
static void subTask1 (rtems_task_argument arg) { rtems_status_code sc; rtems_task_wake_after (ticksPerSecond * 3); sc = rtems_event_send (taskId2, 1); if (sc != RTEMS_SUCCESSFUL) { printf ("subTask1 - Can't send event (%d)\n", sc); rtems_task_suspend (RTEMS_SELF); } rtems_task_wake_after (ticksPerSecond * 3); printf ("subTask1 - Event sent\n"); rtems_task_suspend (RTEMS_SELF); printf ("subTask1 - Back to task 1\n"); rtems_task_wake_after (ticksPerSecond * 3); rtems_task_suspend (RTEMS_SELF); }
static void release_callback(rtems_id timer_id, void *arg) { rtems_status_code sc = RTEMS_SUCCESSFUL; char buf [1]; size_t size = sizeof(buf); uint32_t released = 0; assert_time(T4); rtems_test_assert( obtain_try && interrupt_happened && !delayed_happened && !interrupt_triggered_happened && !server_triggered_happened ); switch (resource_type) { case SEMAPHORE: sc = rtems_semaphore_release(semaphore); break; case MUTEX: sc = rtems_semaphore_release(mutex); break; case MESSAGE_QUEUE: sc = rtems_message_queue_send(message_queue, buf, size); break; case EVENT: sc = rtems_event_send(_Timer_server->thread->Object.id, RTEMS_EVENT_0); break; case BARRIER: sc = rtems_barrier_release(barrier, &released); break; case TASK_WAKE_AFTER: sc = RTEMS_SUCCESSFUL; break; default: rtems_test_assert(false); break; } directive_failed_with_level(sc, "release", 1); release_happened = true; }
static void timeout_before_satisfied(rtems_id timer, void *arg) { rtems_status_code sc; test_context *ctx = arg; Thread_Control *thread = ctx->thread; Thread_Wait_flags flags = _Thread_Wait_flags_get(thread); if (blocks_for_event(flags)) { ctx->hit = interrupts_blocking_op(flags); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL); _Thread_Timeout(&thread->Timer.Watchdog); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT); sc = rtems_event_send(thread->Object.id, EVENTS); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT); if (ctx->hit) { rtems_test_assert( _Thread_Wait_flags_get(thread) == (THREAD_WAIT_CLASS_EVENT | THREAD_WAIT_STATE_READY_AGAIN) ); } rtems_test_assert(thread->Wait.count == EVENTS); } sc = rtems_timer_reset(timer); rtems_test_assert(sc == RTEMS_SUCCESSFUL); }
static void timeout_before_satisfied(rtems_id timer, void *arg) { rtems_status_code sc; test_context *ctx = arg; const Thread_Control *thread = ctx->thread; if (thread->Wait.count != 0) { ctx->hit = _Event_Sync_state == THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED; rtems_test_assert(thread->Wait.count == EVENTS); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL); _Event_Timeout(thread->Object.id, &_Event_Sync_state); rtems_test_assert(thread->Wait.count == 0); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT); sc = rtems_event_send(thread->Object.id, EVENTS); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(thread->Wait.count == 0); rtems_test_assert( *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF ); rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT); if (ctx->hit) { rtems_test_assert( _Event_Sync_state == THREAD_BLOCKING_OPERATION_TIMEOUT ); } } sc = rtems_timer_reset(timer); rtems_test_assert(sc == RTEMS_SUCCESSFUL); }
void destory_all_tasks( const char *who ) { uint32_t task; /* * If the id is not zero, signal the task to delete. */ for (task = 0; task < MAX_TASKS; task++) if (task_id[task]) { printf(" %s : signal task %08" PRIxrtems_id " to delete, ", who, task_id[task]); fflush(stdout); rtems_event_send(task_id[task], 1); task_id[task] = 0; } }
/* * WD interrupt handler */ static void wd8003Enet_interrupt_handler (void *unused) { unsigned int tport; unsigned char status, status2; tport = wd_softc[0].port ; /* * Read status */ inport_byte(tport+ISR, status); outport_byte(tport+IMR, 0x00); /* * Ring overwrite */ if (status & MSK_OVW){ outport_byte(tport+CMDR, MSK_STP + MSK_RD2); /* stop 8390 */ Wait_X_ms(2); outport_byte(tport+RBCR0, 0); /* clear byte count */ outport_byte(tport+RBCR1, 0); inport_byte(tport+ISR, status2); status |= (status2 & (MSK_PTX+MSK_TXE)) ; /* TX status */ outport_byte(tport+TCR, MSK_LOOP); /* loopback mode */ outport_byte(tport+CMDR, MSK_STA + MSK_RD2); /* start */ overrun = 1 ; if ((status & (MSK_PTX+MSK_TXE)) == 0) resend = 1; } /* * Frame received? */ if (status & (MSK_PRX+MSK_RXE)) { outport_byte(tport+ISR, status & (MSK_PRX+MSK_RXE)); wd_softc[0].rxInterrupts++; rtems_event_send (wd_softc[0].rxDaemonTid, INTERRUPT_EVENT); } }
static void task(rtems_task_argument arg) { rtems_task_priority task_priority; rtems_status_code sc; sc = rtems_task_set_priority( RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &task_priority ); rtems_test_assert(sc == RTEMS_SUCCESSFUL); if (arg == 1) { sc = rtems_event_send(task_ids[0], SECOND_TASK_READY); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } while (true) { /* Do nothing */ } }
static void test_with_normal_and_system_event(void) { rtems_status_code sc; rtems_event_set out; /* Assert no events pending */ sc = rtems_event_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_UNSATISFIED); sc = rtems_event_system_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_UNSATISFIED); /* Send system event */ sc = rtems_event_system_send(rtems_task_self(), EVENT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); sc = rtems_event_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_UNSATISFIED); out = 0; sc = rtems_event_system_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(out == EVENT); /* Send normal event */ sc = rtems_event_send(rtems_task_self(), EVENT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); out = 0; sc = rtems_event_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_SUCCESSFUL); rtems_test_assert(out == EVENT); sc = rtems_event_system_receive(EVENT, RTEMS_NO_WAIT, 0, &out); rtems_test_assert(sc == RTEMS_UNSATISFIED); }
/* * Line specific (tty) read routine. */ int pppread(struct rtems_termios_tty *tty, rtems_libio_rw_args_t *rw_args) { rtems_status_code status = RTEMS_UNSATISFIED; int count = 0; int maximum = rw_args->count; char *buffer = rw_args->buffer; register struct ppp_softc *sc = (struct ppp_softc *)tty->t_sc; struct mbuf *m; struct mbuf *m0; u_char *p; if (sc == NULL) return 0; /* * Loop waiting for input, checking that nothing disasterous * happens in the meantime. */ if (tty != (struct rtems_termios_tty *)sc->sc_devp || tty->t_line != PPPDISC) { return ( status ); } if (sc->sc_inq.ifq_head == NULL) { return ( status ); } /* Get the packet from the input queue */ rtems_bsdnet_semaphore_obtain(); IF_DEQUEUE(&sc->sc_inq, m0); /* loop over mbuf chain */ m = m0; while (( m != NULL ) && ( m->m_len > 0 ) && ( count+m->m_len < maximum )) { /* copy data into buffer */ p = mtod(m, u_char *); memcpy(buffer, p, m->m_len); memset(p, 0, m->m_len); count += m->m_len; buffer += m->m_len; /* increment loop index */ m = m->m_next; } /* free mbuf chain */ m_freem(m0); rtems_bsdnet_semaphore_release(); /* update return values */ rw_args->bytes_moved = count; if ( count >= 0 ) { status = RTEMS_SUCCESSFUL; } /* check to see if queue is empty */ if (sc->sc_inq.ifq_head != NULL) { /* queue is not empty - post another event to ourself */ rtems_event_send(sc->sc_pppdtask, PPPD_EVENT); } return ( status ); }
void xilTemacIsrSingle(struct XilTemac* xilTemac) { uint32_t base = xilTemac->iAddr; uint32_t disr = IN32( base + XTE_DISR_OFFSET ); struct ifnet* ifp = xilTemac->iIfp; if( disr && (ifp->if_flags & IFF_RUNNING) == 0 ) { /* some interrupt status bits are asserted but card is down */ printk("%s: Fatal error, disr 0 or this emac not running\n", DRIVER_PREFIX); /*assert(0);*/ } else { /* Handle all error conditions first */ if( disr & (XTE_DXR_DPTO_MASK | XTE_DXR_TERR_MASK | XTE_DXR_RECV_FIFO_MASK | XTE_DXR_SEND_FIFO_MASK) ) { printk("%s: Fatal Bus error, disr: %08x\n", DRIVER_PREFIX, disr); /*assert(0);*/ } if( disr & XTE_DXR_CORE_MASK ) { /* Normal case, temac interrupt */ uint32_t ipisr = IN32(base + XTE_IPISR_OFFSET); uint32_t ipier = IN32(base + XTE_IPIER_OFFSET); uint32_t newipier = ipier; uint32_t pending = ipisr & ipier; xilTemac->iStats.iInterrupts++; /* Check for all fatal errors, even if that error is not enabled in ipier */ if(ipisr & XTE_IPXR_FIFO_FATAL_ERROR_MASK) { printk("%s: Fatal Fifo Error ipisr: %08x\n", DRIVER_PREFIX, ipisr); /*assert(0);*/ } if(pending & XTE_IPXR_RECV_DONE_MASK) { /* We've received a packet - inc stats - disable rx interrupt - signal rx thread to empty out fifo (rx thread must renable interrupt) */ xilTemac->iStats.iRxInterrupts++; newipier &= ~XTE_IPXR_RECV_DONE_MASK; rtems_event_send(gXilRxThread, xilTemac->iIoEvent); } if(pending & XTE_IPXR_XMIT_DONE_MASK) { /* We've transmitted a packet. This interrupt is only ever enabled in the ipier if the tx thread didn't have enough space in the data fifo or the tplr fifo. If that's the case, we: - inc stats - disable tx interrupt - signal tx thread that a transmit has completed and thus there is now room to send again. */ xilTemac->iStats.iTxInterrupts++; newipier &= ~XTE_IPXR_XMIT_DONE_MASK; rtems_event_send(gXilTxThread, xilTemac->iIoEvent); } if(pending & XTE_IPXR_RECV_DROPPED_MASK) { /* A packet was dropped (because it was invalid, or receiving it have overflowed one of the rx fifo's). - Increment stats. - Clear interrupt condition. */ uint32_t toggle = 0; if(pending & XTE_IPXR_RECV_REJECT_MASK) { xilTemac->iStats.iRxRejectedInvalidFrame++; toggle |= XTE_IPXR_RECV_REJECT_MASK; } if(pending & XTE_IPXR_RECV_PFIFO_ABORT_MASK) { xilTemac->iStats.iRxRejectedDataFifoFull++; toggle |= XTE_IPXR_RECV_PFIFO_ABORT_MASK; } if(pending & XTE_IPXR_RECV_LFIFO_ABORT_MASK) { xilTemac->iStats.iRxRejectedLengthFifoFull++; toggle |= XTE_IPXR_RECV_LFIFO_ABORT_MASK; } xilTemac->iStats.iRxRejectedInterrupts++; OUT32(base + XTE_IPISR_OFFSET, toggle); } if(pending & XTE_IPXR_AUTO_NEG_MASK) { printk("%s: Autonegotiation finished\n", DRIVER_PREFIX); OUT32(base + XTE_IPISR_OFFSET, XTE_IPXR_AUTO_NEG_MASK); } if(newipier != ipier) { OUT32(base + XTE_IPIER_OFFSET, newipier); } } } }
void test2() { rtems_status_code result; uint32_t remove_task; uint32_t task; uint32_t block; uint32_t task_count = 0; rtems_id removed_ids[TASK_ALLOCATION_SIZE * 2]; char c1 = 'a'; char c2 = 'a'; char c3 = '0'; char c4 = '0'; printf( "\n TEST2 : re-allocate of index numbers, and a block free'ed and one inactive\n" ); /* * Allocate enought tasks so the Inactive list is empty. Remember * to count the Init task, ie ... - 1. */ while (task_count < ((TASK_ALLOCATION_SIZE * 5) - TASK_INDEX_OFFSET)) { rtems_name name; printf(" TEST2 : creating task '%c%c%c%c', ", c1, c2, c3, c4); name = rtems_build_name(c1, c2, c3, c4); result = rtems_task_create(name, 10, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_ATTRIBUTES, RTEMS_LOCAL, &task_id[task_count]); if (status_code_bad(result)) break; printf("number = %3" PRIi32 ", id = %08" PRIxrtems_id ", starting, ", task_count, task_id[task_count]); fflush(stdout); result = rtems_task_start(task_id[task_count], test_task, (rtems_task_argument) task_count); if (status_code_bad(result)) break; /* * Update the name. */ NEXT_TASK_NAME(c1, c2, c3, c4); task_count++; } /* * Take out the second and fourth allocation size block of tasks */ if (task_count != ((TASK_ALLOCATION_SIZE * 5) - TASK_INDEX_OFFSET)) { printf( " FAIL2 : not enough tasks created -\n" " task created = %" PRIi32 ", required number = %i\n", task_count, (TASK_ALLOCATION_SIZE * 5) - TASK_INDEX_OFFSET); destory_all_tasks("TEST2"); exit( 1 ); } task = 0; for (block = 1; block < 4; block += 2) { for (remove_task = (block * TASK_ALLOCATION_SIZE) - TASK_INDEX_OFFSET; remove_task < (((block + 1) * TASK_ALLOCATION_SIZE) - TASK_INDEX_OFFSET); remove_task++) { if (!task_id[remove_task]) { printf( " FAIL2 : remove task has a 0 id -\n" " task number = %" PRIi32 "\n", remove_task); destory_all_tasks("TEST2"); exit( 1 ); } /* * Save the id's to match them against the reallocated ids */ removed_ids[task++] = task_id[remove_task]; printf(" TEST2 : block %" PRIi32 " remove, signal task %08" PRIxrtems_id ", ", block, task_id[remove_task]); rtems_event_send(task_id[remove_task], 1); task_id[remove_task] = 0; } } for (task = 0; task < (TASK_ALLOCATION_SIZE * 2); task++) { rtems_name name; uint32_t id_slot; /* * Find a free slot in the task id table. */ for (id_slot = 0; id_slot < MAX_TASKS; id_slot++) if (!task_id[id_slot]) break; if (id_slot == MAX_TASKS) { printf( " FAIL2 : no free task id slot.\n"); destory_all_tasks("TEST2"); exit( 1 ); } printf(" TEST2 : creating task '%c%c%c%c', ", c1, c2, c3, c4); name = rtems_build_name(c1, c2, c3, c4); result = rtems_task_create(name, 10, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_ATTRIBUTES, RTEMS_LOCAL, &task_id[id_slot]); if (status_code_bad(result)) { printf( " FAIL2 : re-creating a task -\n" " task number = %" PRIi32 "\n", id_slot); destory_all_tasks("TEST2"); exit( 1 ); } printf("number = %3" PRIi32 ", id = %08" PRIxrtems_id ", starting, ", task_count, task_id[id_slot]); result = rtems_task_start(task_id[id_slot], test_task, (rtems_task_argument) task_count); if (status_code_bad(result)) { printf( " FAIL : re-starting a task -\n" " task number = %" PRIi32 "\n", id_slot); destory_all_tasks("TEST2"); exit( 1 ); } /* * Update the name. */ NEXT_TASK_NAME(c1, c2, c3, c4); /* * Search the removed ids to see if it existed, clear the removed id when found */ for (remove_task = 0; remove_task < (TASK_ALLOCATION_SIZE * 2); remove_task++) if (removed_ids[remove_task] == task_id[id_slot]) { removed_ids[remove_task] = 0; break; } /* * If not located in the removed id table, check and make sure it is not * already allocated */ if (remove_task == (TASK_ALLOCATION_SIZE * 2)) { uint32_t allocated_id; for (allocated_id = 0; allocated_id < MAX_TASKS; allocated_id++) if ((task_id[id_slot] == task_id[allocated_id]) && (id_slot != allocated_id)) { printf( " FAIL2 : the new id is the same as an id already allocated -\n" " task id = %08" PRIxrtems_id "\n", task_id[id_slot]); exit( 1 ); } printf( " FAIL2 : could not find the task id in the removed table -\n" " task id = %08" PRIxrtems_id "\n", task_id[id_slot]); exit( 1 ); } task_count++; } destory_all_tasks("TEST2"); printf( " TEST2 : completed\n" ); }
rtems_task Task_1( rtems_task_argument argument ) { rtems_name name RTEMS_GCC_NOWARN_UNUSED; uint32_t index RTEMS_GCC_NOWARN_UNUSED; rtems_id id RTEMS_GCC_NOWARN_UNUSED; rtems_task_priority in_priority RTEMS_GCC_NOWARN_UNUSED; rtems_task_priority out_priority RTEMS_GCC_NOWARN_UNUSED; rtems_mode in_mode RTEMS_GCC_NOWARN_UNUSED; rtems_mode mask RTEMS_GCC_NOWARN_UNUSED; rtems_mode out_mode RTEMS_GCC_NOWARN_UNUSED; rtems_time_of_day time RTEMS_GCC_NOWARN_UNUSED; rtems_interval timeout RTEMS_GCC_NOWARN_UNUSED; rtems_signal_set signals RTEMS_GCC_NOWARN_UNUSED; void *address_1 RTEMS_GCC_NOWARN_UNUSED; rtems_event_set events RTEMS_GCC_NOWARN_UNUSED; long buffer[ 4 ] RTEMS_GCC_NOWARN_UNUSED; uint32_t count RTEMS_GCC_NOWARN_UNUSED; rtems_device_major_number major RTEMS_GCC_NOWARN_UNUSED; rtems_device_minor_number minor RTEMS_GCC_NOWARN_UNUSED; uint32_t io_result RTEMS_GCC_NOWARN_UNUSED; uint32_t error RTEMS_GCC_NOWARN_UNUSED; rtems_clock_get_options options RTEMS_GCC_NOWARN_UNUSED; name = rtems_build_name( 'N', 'A', 'M', 'E' ); in_priority = 250; in_mode = RTEMS_NO_PREEMPT; mask = RTEMS_PREEMPT_MASK; timeout = 100; signals = RTEMS_SIGNAL_1 | RTEMS_SIGNAL_3; major = 10; minor = 0; error = 100; options = 0; /* rtems_shutdown_executive */ benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) rtems_shutdown_executive( error ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_shutdown_executive", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_create( name, in_priority, RTEMS_MINIMUM_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_ident( name, RTEMS_SEARCH_ALL_NODES, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_start */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_start( id, Task_1, 0 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_start", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_restart */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_restart( id, 0 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_restart", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_suspend */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_suspend( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_suspend", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_resume */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_resume( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_resume", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_set_priority */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_set_priority( id, in_priority, &out_priority ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_set_priority", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_mode */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_mode( in_mode, mask, &out_mode ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_mode", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_wake_when */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_wake_when( time ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_wake_when", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_task_wake_after */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_task_wake_after( timeout ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_task_wake_after", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_interrupt_catch */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_interrupt_catch( Isr_handler, 5, address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_interrupt_catch", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_clock_get */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_clock_get( options, time ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_clock_get", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_clock_set */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_clock_set( time ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_clock_set", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_clock_tick */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_clock_tick(); end_time = benchmark_timer_read(); put_time( "overhead: rtems_clock_tick", end_time, OPERATION_COUNT, overhead, 0 ); rtems_test_pause(); /* rtems_timer_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_create( name, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_ident( name, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_fire_after */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_fire_after( id, timeout, Timer_handler, NULL ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_fire_after", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_fire_when */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_fire_when( id, time, Timer_handler, NULL ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_fire_when", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_reset */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_reset( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_reset", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_timer_cancel */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_timer_cancel( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_timer_cancel", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_semaphore_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_semaphore_create( name, 128, RTEMS_DEFAULT_ATTRIBUTES, RTEMS_NO_PRIORITY, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_semaphore_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_semaphore_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_semaphore_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_semaphore_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_semaphore_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_semaphore_ident( name, RTEMS_SEARCH_ALL_NODES, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_semaphore_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_semaphore_obtain */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_semaphore_obtain( id, RTEMS_DEFAULT_OPTIONS, timeout ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_semaphore_obtain", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_semaphore_release */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_semaphore_release( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_semaphore_release", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_create( name, 128, RTEMS_DEFAULT_ATTRIBUTES, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_ident( name, RTEMS_SEARCH_ALL_NODES, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_send */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_send( id, (long (*)[4])buffer ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_send", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_urgent */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_urgent( id, (long (*)[4])buffer ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_urgent", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_broadcast */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_broadcast( id, (long (*)[4])buffer, &count ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_broadcast", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_receive */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_receive( id, (long (*)[4])buffer, RTEMS_DEFAULT_OPTIONS, timeout ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_receive", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_message_queue_flush */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_message_queue_flush( id, &count ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_message_queue_flush", end_time, OPERATION_COUNT, overhead, 0 ); rtems_test_pause(); /* rtems_event_send */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_event_send( id, events ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_event_send", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_event_receive */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, timeout, &events ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_event_receive", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_signal_catch */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_signal_catch( Asr_handler, RTEMS_DEFAULT_MODES ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_signal_catch", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_signal_send */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_signal_send( id, signals ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_signal_send", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_partition_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_partition_create( name, Memory_area, 2048, 128, RTEMS_DEFAULT_ATTRIBUTES, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_partition_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_partition_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_partition_ident( name, RTEMS_SEARCH_ALL_NODES, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_partition_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_partition_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_partition_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_partition_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_partition_get_buffer */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_partition_get_buffer( id, address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_partition_get_buffer", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_partition_return_buffer */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_partition_return_buffer( id, address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_partition_return_buffer", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_region_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_region_create( name, Memory_area, 2048, 128, RTEMS_DEFAULT_ATTRIBUTES, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_region_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_region_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_region_ident( name, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_region_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_region_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_region_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_region_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_region_get_segment */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_region_get_segment( id, 243, RTEMS_DEFAULT_OPTIONS, timeout, &address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_region_get_segment", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_region_return_segment */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_region_return_segment( id, address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_region_return_segment", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_port_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_port_create( name, Internal_port_area, External_port_area, 0xff, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_port_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_port_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_port_ident( name, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_port_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_port_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_port_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_port_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_port_external_to_internal */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_port_external_to_internal( id, &External_port_area[ 7 ], address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_port_external_to_internal", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_port_internal_to_external */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_port_internal_to_external( id, &Internal_port_area[ 7 ], address_1 ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_port_internal_to_external", end_time, OPERATION_COUNT, overhead, 0 ); rtems_test_pause(); /* rtems_io_initialize */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_initialize( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_initialize", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_io_open */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_open( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_open", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_io_close */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_close( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_close", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_io_read */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_read( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_read", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_io_write */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_write( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_write", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_io_control */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_io_control( major, minor, address_1, &io_result ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_io_control", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_fatal_error_occurred */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_fatal_error_occurred( error ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_fatal_error_occurred", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_rate_monotonic_create */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_rate_monotonic_create( name, &id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_rate_monotonic_create", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_rate_monotonic_ident */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_rate_monotonic_ident( name, id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_rate_monotonic_ident", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_rate_monotonic_delete */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_rate_monotonic_delete( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_rate_monotonic_delete", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_rate_monotonic_cancel */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_rate_monotonic_cancel( id ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_rate_monotonic_cancel", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_rate_monotonic_period */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_rate_monotonic_period( id, timeout ); end_time = benchmark_timer_read(); put_time( "overhead: rtems_rate_monotonic_period", end_time, OPERATION_COUNT, overhead, 0 ); /* rtems_multiprocessing_announce */ benchmark_timer_initialize(); for ( index = 1 ; index <= OPERATION_COUNT ; index ++ ) (void) rtems_multiprocessing_announce(); end_time = benchmark_timer_read(); put_time( "overhead: rtems_multiprocessing_announce", end_time, OPERATION_COUNT, overhead, 0 ); TEST_END(); rtems_test_exit( 0 ); }
rtems_status_code rtems_termios_close (void *arg) { rtems_libio_open_close_args_t *args = arg; struct rtems_termios_tty *tty = args->iop->data1; rtems_status_code sc; sc = rtems_semaphore_obtain (rtems_termios_ttyMutex, RTEMS_WAIT, RTEMS_NO_TIMEOUT); if (sc != RTEMS_SUCCESSFUL) rtems_fatal_error_occurred (sc); if (--tty->refcount == 0) { if (rtems_termios_linesw[tty->t_line].l_close != NULL) { /* * call discipline-specific close */ sc = rtems_termios_linesw[tty->t_line].l_close(tty); } else { /* * default: just flush output buffer */ sc = rtems_semaphore_obtain (tty->osem, RTEMS_WAIT, RTEMS_NO_TIMEOUT); if (sc != RTEMS_SUCCESSFUL) { rtems_fatal_error_occurred (sc); } drainOutput (tty); } if (tty->device.outputUsesInterrupts == TERMIOS_TASK_DRIVEN) { /* * send "terminate" to I/O tasks */ sc = rtems_event_send( tty->rxTaskId, TERMIOS_RX_TERMINATE_EVENT); if (sc != RTEMS_SUCCESSFUL) rtems_fatal_error_occurred (sc); sc = rtems_event_send( tty->txTaskId, TERMIOS_TX_TERMINATE_EVENT); if (sc != RTEMS_SUCCESSFUL) rtems_fatal_error_occurred (sc); } if (tty->device.lastClose) (*tty->device.lastClose)(tty->major, tty->minor, arg); if (tty->forw == NULL) { rtems_termios_ttyTail = tty->back; if ( rtems_termios_ttyTail != NULL ) { rtems_termios_ttyTail->forw = NULL; } } else { tty->forw->back = tty->back; } if (tty->back == NULL) { rtems_termios_ttyHead = tty->forw; if ( rtems_termios_ttyHead != NULL ) { rtems_termios_ttyHead->back = NULL; } } else { tty->back->forw = tty->forw; } rtems_semaphore_delete (tty->isem); rtems_semaphore_delete (tty->osem); rtems_semaphore_delete (tty->rawOutBuf.Semaphore); if ((tty->device.pollRead == NULL) || (tty->device.outputUsesInterrupts == TERMIOS_TASK_DRIVEN)) rtems_semaphore_delete (tty->rawInBuf.Semaphore); free (tty->rawInBuf.theBuf); free (tty->rawOutBuf.theBuf); free (tty->cbuf); free (tty); } rtems_semaphore_release (rtems_termios_ttyMutex); return RTEMS_SUCCESSFUL; }
rtems_task Test_task( rtems_task_argument argument ) { rtems_status_code status; uint32_t count; uint32_t remote_node; rtems_id remote_tid; rtems_event_set event_out; rtems_event_set event_for_this_iteration; Stop_Test = FALSE; remote_node = (Multiprocessing_configuration.node == 1) ? 2 : 1; puts_nocr( "Remote task's name is : " ); put_name( Task_name[ remote_node ], TRUE ); puts( "Getting TID of remote task" ); do { status = rtems_task_ident( Task_name[ remote_node ], RTEMS_SEARCH_ALL_NODES, &remote_tid ); } while ( status != RTEMS_SUCCESSFUL ); directive_failed( status, "rtems_task_ident FAILED!!" ); if ( Multiprocessing_configuration.node == 1 ) puts( "Sending events to remote task" ); else puts( "Receiving events from remote task" ); status = rtems_timer_fire_after( Timer_id[ 1 ], 5 * TICKS_PER_SECOND, Stop_Test_TSR, NULL ); directive_failed( status, "rtems_timer_fire_after" ); count = 0; for ( ; ; ) { if ( Stop_Test == TRUE ) break; event_for_this_iteration = Event_set_table[ count % 32 ]; if ( Multiprocessing_configuration.node == 1 ) { status = rtems_event_send( remote_tid, event_for_this_iteration ); directive_failed( status, "rtems_event_send" ); status = rtems_task_wake_after( 1 ); directive_failed( status, "rtems_task_wake_after" ); } else { status = rtems_event_receive( event_for_this_iteration, RTEMS_DEFAULT_OPTIONS, 1 * TICKS_PER_SECOND, &event_out ); if ( rtems_are_statuses_equal( status, RTEMS_TIMEOUT ) ) { if ( Multiprocessing_configuration.node == 2 ) puts( "\nCorrect behavior if the other node exitted." ); else puts( "\nERROR... node 1 died" ); break; } else directive_failed( status, "rtems_event_receive" ); } if ( (count % DOT_COUNT) == 0 ) put_dot('.'); count++; } putchar( '\n' ); if ( Multiprocessing_configuration.node == 2 ) { status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, 1 * TICKS_PER_SECOND, &event_out ); fatal_directive_status( status, RTEMS_TIMEOUT, "rtems_event_receive" ); puts( "rtems_event_receive - correctly returned RTEMS_TIMEOUT" ); } puts( "*** END OF TEST 6 ***" ); rtems_test_exit( 0 ); }
/*PAGE * * daemon * * This task runs forever. It waits for service requests on the FTP port * (port 21 by default). When a request is received, it opens a new session * to handle those requests until the connection is closed. * * Input parameters: * NONE * * Output parameters: * NONE */ static void daemon(rtems_task_argument args __attribute__((unused))) { int s; socklen_t addrLen; struct sockaddr_in addr; FTPD_SessionInfo_t *info = NULL; s = socket(PF_INET, SOCK_STREAM, 0); if (s < 0) syslog(LOG_ERR, "ftpd: Error creating socket: %s", serr()); addr.sin_family = AF_INET; addr.sin_port = htons(rtems_ftpd_configuration.port); addr.sin_addr.s_addr = htonl(INADDR_ANY); memset(addr.sin_zero, 0, sizeof(addr.sin_zero)); if (0 > bind(s, (struct sockaddr *)&addr, sizeof(addr))) syslog(LOG_ERR, "ftpd: Error binding control socket: %s", serr()); else if (0 > listen(s, 1)) syslog(LOG_ERR, "ftpd: Error listening on control socket: %s", serr()); else while (1) { int ss; addrLen = sizeof(addr); ss = accept(s, (struct sockaddr *)&addr, &addrLen); if (0 > ss) syslog(LOG_ERR, "ftpd: Error accepting control connection: %s", serr()); else if(!set_socket_timeout(ss, ftpd_timeout)) close_socket(ss); else { info = task_pool_obtain(); if (NULL == info) { close_socket(ss); } else { info->ctrl_socket = ss; if ((info->ctrl_fp = fdopen(info->ctrl_socket, "r+")) == NULL) { syslog(LOG_ERR, "ftpd: fdopen() on socket failed: %s", serr()); close_stream(info); task_pool_release(info); } else { /* Initialize corresponding SessionInfo structure */ info->def_addr = addr; if(0 > getsockname(ss, (struct sockaddr *)&addr, &addrLen)) { syslog(LOG_ERR, "ftpd: getsockname(): %s", serr()); close_stream(info); task_pool_release(info); } else { info->use_default = 1; info->ctrl_addr = addr; info->pasv_socket = -1; info->data_socket = -1; info->xfer_mode = TYPE_A; info->data_addr.sin_port = htons(ntohs(info->ctrl_addr.sin_port) - 1); info->idle = ftpd_timeout; /* Wakeup the session task. The task will call task_pool_release after it closes connection. */ rtems_event_send(info->tid, FTPD_RTEMS_EVENT); } } } } } rtems_task_delete(RTEMS_SELF); }
void Screen4() { rtems_event_set event_out; rtems_time_of_day time; struct timeval tv; time_t seconds; rtems_status_code status; status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, NULL ); fatal_directive_status( status, RTEMS_INVALID_ADDRESS, "rtems_event_receive NULL param" ); puts( "TA1 - rtems_event_receive - NULL param - RTEMS_INVALID_ADDRESS" ); status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, &event_out ); fatal_directive_status( status, RTEMS_UNSATISFIED, "rtems_event_receive unsatisfied (ALL)" ); puts( "TA1 - rtems_event_receive - RTEMS_UNSATISFIED ( all conditions )" ); status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_EVENT_ANY | RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, &event_out ); fatal_directive_status( status, RTEMS_UNSATISFIED, "rtems_event_receive unsatisfied (ANY)" ); puts( "TA1 - rtems_event_receive - RTEMS_UNSATISFIED ( any condition )" ); puts( "TA1 - rtems_event_receive - timeout in 3 seconds" ); status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, 3 * rtems_clock_get_ticks_per_second(), &event_out ); fatal_directive_status( status, RTEMS_TIMEOUT, "rtems_event_receive" ); puts( "TA1 - rtems_event_receive - woke up with RTEMS_TIMEOUT" ); status = rtems_event_send( 100, RTEMS_EVENT_16 ); fatal_directive_status( status, RTEMS_INVALID_ID, "rtems_event_send with illegal id" ); puts( "TA1 - rtems_event_send - RTEMS_INVALID_ID" ); puts( "TA1 - rtems_task_wake_after - sleep 1 second - RTEMS_SUCCESSFUL" ); status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() ); directive_failed( status, "rtems_task_wake_after" ); build_time( &time, 2, 5, 1988, 8, 30, 45, 0 ); print_time( "TA1 - rtems_clock_set - ", &time, "" ); status = rtems_clock_set( &time ); directive_failed( status, "rtems_clock_set" ); puts( " - RTEMS_SUCCESSFUL" ); status = rtems_clock_get_tod_timeval( &tv ); directive_failed( status, "clock_get_tod_timeval OK" ); seconds = tv.tv_sec; printf( "TA1 - current time - %s\n", ctime(&seconds) ); }
rtems_task Low_task( rtems_task_argument argument ) { uint32_t index; rtems_event_set event_out; end_time = benchmark_timer_read(); put_time( "rtems_event_receive: not available caller blocks", end_time, OPERATION_COUNT, 0, CALLING_OVERHEAD_EVENT_RECEIVE ); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) benchmark_timer_empty_function(); overhead = benchmark_timer_read(); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) rtems_event_send( RTEMS_SELF, RTEMS_EVENT_16 ); end_time = benchmark_timer_read(); put_time( "rtems_event_send: no task readied", end_time, OPERATION_COUNT, overhead, CALLING_OVERHEAD_EVENT_SEND ); benchmark_timer_initialize(); (void) rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &event_out ); end_time = benchmark_timer_read(); put_time( "rtems_event_receive: available", end_time, 1, 0, CALLING_OVERHEAD_EVENT_RECEIVE ); benchmark_timer_initialize(); for ( index=1 ; index <= OPERATION_COUNT ; index++ ) (void) rtems_event_send( Task_id[ index ], RTEMS_EVENT_16 ); end_time = benchmark_timer_read(); put_time( "rtems_event_send: task readied returns to caller", end_time, OPERATION_COUNT, overhead, CALLING_OVERHEAD_EVENT_SEND ); TEST_END(); rtems_test_exit( 0 ); }
rtems_task Test_task( rtems_task_argument argument ) { rtems_status_code status; uint32_t count; uint32_t remote_node; rtems_id remote_tid; rtems_event_set event_out; remote_node = ((Multiprocessing_configuration.node == 1) ? 2 : 1); puts( "About to go to sleep!" ); status = rtems_task_wake_after( rtems_clock_get_ticks_per_second() ); directive_failed( status, "rtems_task_wake_after" ); puts( "Waking up!" ); puts_nocr( "Remote task's name is : " ); put_name( Task_name[ remote_node ], TRUE ); puts( "Getting TID of remote task" ); while ( FOREVER ) { status = rtems_task_ident( Task_name[ remote_node ], RTEMS_SEARCH_ALL_NODES, &remote_tid ); if ( status == RTEMS_SUCCESSFUL ) break; puts( "rtems_task_ident FAILED!!" ); rtems_task_wake_after(2); } if ( Multiprocessing_configuration.node == 1 ) { puts( "Sending events to remote task" ); while ( Stop_Test == false ) { for ( count=EVENT_TASK_DOT_COUNT; Stop_Test == false && count; count-- ) { status = rtems_event_send( remote_tid, RTEMS_EVENT_16 ); directive_failed( status, "rtems_event_send" ); } put_dot( 'e' ); } } puts( "Receiving events from remote task" ); while ( Stop_Test == false ) { for ( count=EVENT_TASK_DOT_COUNT ; Stop_Test == false && count ; count-- ) { status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &event_out ); directive_failed( status, "rtems_event_receive" ); } put_dot( 'e' ); } Exit_test(); }
/** * BDBUf send wait event. */ static bool bdbuf_send_watch_event (const char* task, const char* msg, rtems_id id) { bdbuf_test_printf ("%s: %s: %08x: ", task, msg, id); return bdbuf_test_print_sc (rtems_event_send (id, RTEMS_EVENT_1), true); }
rtems_task Task_1( rtems_task_argument argument ) { rtems_event_set eventout; rtems_time_of_day time; rtems_status_code status; uint32_t index; puts( "TA1 - rtems_event_send - send RTEMS_EVENT_16 to TA2" ); status = rtems_event_send( Task_id[ 2 ], RTEMS_EVENT_16 ); directive_failed( status, "rtems_event_send" ); puts( "TA1 - rtems_event_receive - waiting forever on " "RTEMS_EVENT_14 and RTEMS_EVENT_15" ); status = rtems_event_receive( RTEMS_EVENT_14 | RTEMS_EVENT_15, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA1 - RTEMS_EVENT_14 and RTEMS_EVENT_15 received - " "eventout => %08" PRIxrtems_event_set "\n", eventout ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_18 to TA2" ); status = rtems_event_send( Task_id[ 2 ], RTEMS_EVENT_18 ); directive_failed( status, "rtems_event_send" ); puts( "TA1 - rtems_event_receive - waiting with 10 second timeout " "on RTEMS_EVENT_14" ); status = rtems_event_receive( RTEMS_EVENT_14, RTEMS_DEFAULT_OPTIONS, 10 * rtems_clock_get_ticks_per_second(), &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA1 - RTEMS_EVENT_14 received - eventout => " "%08" PRIxrtems_event_set "\n", eventout ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_19 to TA2" ); status = rtems_event_send( Task_id[ 2 ], RTEMS_EVENT_19 ); directive_failed( status, "rtems_event_send" ); status = rtems_clock_get_tod( &time ); directive_failed( status, "rtems_clock_get_tod" ); print_time( "TA1 - rtems_clock_get_tod - ", &time, "\n" ); rtems_test_pause(); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_18 to self after 5 seconds"); status = rtems_timer_fire_after( Timer_id[ 1 ], 5 * rtems_clock_get_ticks_per_second(), TA1_send_18_to_self_5_seconds, NULL ); directive_failed( status, "rtems_timer_fire_after 5 seconds" ); puts( "TA1 - rtems_event_receive - waiting forever on RTEMS_EVENT_18" ); status = rtems_event_receive( RTEMS_EVENT_18, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive of 18" ); printf( "TA1 - RTEMS_EVENT_18 received - eventout => %08" PRIxrtems_event_set "\n", eventout ); status = rtems_clock_get_tod( &time ); directive_failed( status, "TA1 rtems_clock_get_tod" ); print_time( "TA1 - rtems_clock_get_tod - ", &time, "\n" ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_3 to self" ); status = rtems_event_send( RTEMS_SELF, RTEMS_EVENT_3 ); directive_failed( status, "rtems_event_send" ); puts( "TA1 - rtems_event_receive - RTEMS_EVENT_3 or " "RTEMS_EVENT_22 - NO_WAIT and ANY" ); status = rtems_event_receive( RTEMS_EVENT_3 | RTEMS_EVENT_22, RTEMS_NO_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive of 3 and 22" ); printf( "TA1 - RTEMS_EVENT_3 received - eventout => %08" PRIxrtems_event_set "\n", eventout ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_4 to self" ); status = rtems_event_send( RTEMS_SELF, RTEMS_EVENT_4 ); directive_failed( status, "rtems_event_send" ); puts ( "TA1 - rtems_event_receive - RTEMS_EVENT_4 or " "RTEMS_EVENT_5 - forever and ANY" ); status = rtems_event_receive( RTEMS_EVENT_4 | RTEMS_EVENT_5, RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA1 - RTEMS_EVENT_4 received - eventout => %08" PRIxrtems_event_set "\n", eventout ); rtems_test_pause(); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_18 to self after 5 seconds"); status = rtems_timer_fire_after( Timer_id[ 1 ], 5 * rtems_clock_get_ticks_per_second(), TA1_send_18_to_self_5_seconds, NULL ); directive_failed( status, "rtems_timer_fire_after 5 seconds" ); puts( "TA1 - rtems_timer_cancel - cancelling timer for event RTEMS_EVENT_18"); status = rtems_timer_cancel( Timer_id[ 1 ] ); directive_failed( status, "rtems_timer_cancel" ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_8 to self after 60 seconds"); status = rtems_timer_fire_after( Timer_id[ 1 ], 60 * rtems_clock_get_ticks_per_second(), TA1_send_8_to_self_60_seconds, NULL ); directive_failed( status, "rtems_timer_fire_after 60 seconds" ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_9 to self after 60 seconds"); status = rtems_timer_fire_after( Timer_id[ 2 ], 60 * rtems_clock_get_ticks_per_second(), TA1_send_9_to_self_60_seconds, NULL ); directive_failed( status, "rtems_timer_fire_after 60 seconds" ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_10 to self after 60 seconds" ); status = rtems_timer_fire_after( Timer_id[ 3 ], 60 * rtems_clock_get_ticks_per_second(), TA1_send_10_to_self, NULL ); directive_failed( status, "rtems_timer_fire_after 60 seconds" ); puts( "TA1 - rtems_timer_cancel - cancelling timer for event RTEMS_EVENT_8" ); status = rtems_timer_cancel( Timer_id[ 1 ] ); directive_failed( status, "rtems_timer_cancel" ); build_time( &time, 2, 12, 1988, 8, 15, 0, 0 ); print_time( "TA1 - rtems_clock_set - ", &time, "\n" ); status = rtems_clock_set( &time ); directive_failed( status, "rtems_clock_set" ); puts( "TA1 - rtems_event_send - send RTEMS_EVENT_1 every second" ); status = rtems_timer_fire_after( Timer_id[ 1 ], rtems_clock_get_ticks_per_second(), TA1_send_1_to_self_every_second, NULL ); directive_failed( status, "rtems_timer_fire_after 1 second" ); for ( index = 0; index < 3; index++ ) { status = rtems_event_receive( RTEMS_EVENT_1, RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); status = rtems_clock_get_tod( &time ); directive_failed( status, "rtems_clock_get_tod" ); printf( "TA1 - RTEMS_EVENT_1 received - eventout => %08" PRIxrtems_event_set " - ", eventout ); print_time( "at ", &time, "\n" ); if ( index < 2 ) { status = rtems_timer_reset( Timer_id[ 1 ] ); directive_failed( status, "rtems_timer_reset" ); }; } puts( "TA1 - rtems_timer_cancel - cancelling timer for event RTEMS_EVENT_1" ); status = rtems_timer_cancel( Timer_id[ 1 ] ); directive_failed( status, "rtems_timer_cancel" ); rtems_test_pause(); time.day = 13; puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 1 day" ); status = rtems_timer_fire_when( Timer_id[ 1 ], &time, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 1 day" ); time.hour = 7; puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 1 day" ); status = rtems_timer_fire_when( Timer_id[ 2 ], &time, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 1 day" ); time.hour = 8; /* so code below has correct time/date */ time.day = 14; puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 2 days" ); status = rtems_timer_fire_when( Timer_id[ 3 ], &time, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 2 days" ); puts("TA1 - rtems_timer_cancel - cancelling RTEMS_EVENT_11 to self in 1 day"); status = rtems_timer_cancel( Timer_id[ 1 ] ); directive_failed( status, "rtems_timer_cancel" ); puts( "TA1 - rtems_timer_cancel - cancelling RTEMS_EVENT_11 to self in 2 days" ); status = rtems_timer_cancel( Timer_id[ 3 ] ); directive_failed( status, "rtems_timer_cancel" ); puts( "TA1 - rtems_event_send - resending RTEMS_EVENT_11 to self in 2 days" ); status = rtems_timer_fire_when( Timer_id[ 3 ], &time, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 2 days" ); time.day = 15; print_time( "TA1 - rtems_clock_set - ", &time, "\n" ); status = rtems_clock_set( &time ); directive_failed( status, "TA1 rtems_clock_set" ); puts( "TA1 - rtems_event_receive - waiting forever on RTEMS_EVENT_11" ); status = rtems_event_receive( RTEMS_EVENT_11, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA1 - RTEMS_EVENT_11 received - eventout => %08" PRIxrtems_event_set "\n", eventout ); rtems_test_pause(); puts( "TA1 - rtems_event_send/rtems_event_receive combination" ); status = rtems_timer_fire_after( Timer_id[ 1 ], 10, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_after 10 ticks" ); status = rtems_event_receive( RTEMS_EVENT_11, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); build_time( &time, 2, 12, 1988, 8, 15, 0, 0 ); print_time( "TA1 - rtems_clock_set - ", &time, "\n" ); status = rtems_clock_set( &time ); directive_failed( status, "rtems_clock_set" ); time.day = 13; puts( "TA1 - rtems_event_receive all outstanding events" ); status = rtems_event_receive( RTEMS_ALL_EVENTS, RTEMS_NO_WAIT | RTEMS_EVENT_ANY, 0, &eventout ); fatal_directive_status( status, RTEMS_UNSATISFIED, "rtems_event_receive all events" ); puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_10 to self in 1 day" ); status = rtems_timer_fire_when( Timer_id[ 1 ], &time, TA1_send_10_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 1 day" ); time.day = 14; puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 2 days" ); status = rtems_timer_fire_when( Timer_id[ 2 ], &time, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when 2 days" ); build_time( &time, 2, 12, 1988, 7, 15, 0, 0 ); print_time( "TA1 - rtems_clock_set - ", &time, "\n" ); puts( "TA1 - set time backwards" ); status = rtems_clock_set( &time ); directive_failed( status, "rtems_clock_set" ); status = rtems_event_receive( RTEMS_ALL_EVENTS, RTEMS_NO_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); if ( eventout ) printf( "ERROR -0x%08" PRIxrtems_event_set " events received\n", eventout ); else puts( "TA1 - no events received" ); fatal_directive_status( status, RTEMS_UNSATISFIED, "rtems_event_receive all events" ); build_time( &time, 2, 14, 1988, 7, 15, 0, 0 ); print_time( "TA1 - rtems_clock_set - ", &time, "\n" ); puts( "TA1 - set time forwards (leave a timer)" ); status = rtems_clock_set( &time ); directive_failed( status, "rtems_clock_set" ); status = rtems_event_receive( RTEMS_ALL_EVENTS, RTEMS_NO_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); if ( eventout == RTEMS_EVENT_10 ) puts( "TA1 - RTEMS_EVENT_10 received" ); else printf( "ERROR -0x%08" PRIxrtems_event_set " events received\n", eventout ); directive_failed( status, "rtems_event_receive all events" ); puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 100 ticks"); status = rtems_timer_fire_after( Timer_id[ 1 ], 100, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_after 100 ticks" ); puts( "TA1 - rtems_event_send - sending RTEMS_EVENT_11 to self in 200 ticks"); status = rtems_timer_fire_after( Timer_id[ 1 ], 200, TA1_send_11_to_self, NULL ); directive_failed( status, "rtems_timer_fire_after 200 ticks" ); /***** *****/ puts( "TA1 - rtems_event_send - send RTEMS_EVENT_4 to self" ); status = rtems_event_send( RTEMS_SELF, RTEMS_EVENT_4 ); directive_failed( status, "rtems_event_send" ); eventout = 0; puts( "TA1 - rtems_event_receive - RTEMS_EVENT_4 AND RTEMS_EVENT_5 - UNSATISFIED" ); status = rtems_event_receive( RTEMS_EVENT_4 | RTEMS_EVENT_5, RTEMS_NO_WAIT | RTEMS_EVENT_ALL, RTEMS_NO_TIMEOUT, &eventout ); fatal_directive_status( status, RTEMS_UNSATISFIED, "rtems_event_receive" ); /***** *****/ puts( "*** END OF TEST 11 ***" ); rtems_test_exit( 0 ); }
rtems_task Task_2( rtems_task_argument argument ) { rtems_event_set eventout; rtems_time_of_day time; rtems_status_code status; status = rtems_task_wake_after( 1*TICKS_PER_SECOND ); directive_failed( status, "rtems_task_wake_after" ); puts( "TA2 - rtems_event_receive - waiting forever on RTEMS_EVENT_16" ); status = rtems_event_receive( RTEMS_EVENT_16, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA2 - RTEMS_EVENT_16 received - eventout => %08x\n", eventout ); puts( "TA2 - rtems_event_send - send RTEMS_EVENT_14 and RTEMS_EVENT_15 to TA1" ); status = rtems_event_send( Task_id[ 1 ], RTEMS_EVENT_14 | RTEMS_EVENT_15 ); directive_failed( status, "rtems_event_send" ); puts( "TA2 - rtems_event_receive - RTEMS_EVENT_17 or " "RTEMS_EVENT_18 - forever and ANY" ); status = rtems_event_receive( RTEMS_EVENT_17 | RTEMS_EVENT_18, RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA2 - RTEMS_EVENT_17 or RTEMS_EVENT_18 received - eventout => %08x\n", eventout ); puts( "TA2 - rtems_event_send - send RTEMS_EVENT_14 to TA1" ); status = rtems_event_send( Task_id[ 1 ], RTEMS_EVENT_14 ); directive_failed( status, "rtems_event_send" ); build_time( &time, 2, 12, 1988, 8, 15, 0, 0 ); print_time( "TA2 - rtems_clock_set - ", &time, "\n" ); status = rtems_clock_set( &time ); directive_failed( status, "TA2 rtems_clock_set" ); time.second += 4; puts( "TA2 - rtems_event_send - sending RTEMS_EVENT_10 to self after 4 seconds" ); status = rtems_timer_fire_when( Timer_id[ 5 ], &time, TA2_send_10_to_self, NULL ); directive_failed( status, "rtems_timer_fire_when after 4 seconds" ); puts( "TA2 - rtems_event_receive - waiting forever on RTEMS_EVENT_10" ); status = rtems_event_receive( RTEMS_EVENT_10, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); status = rtems_clock_get( RTEMS_CLOCK_GET_TOD, &time ); directive_failed( status, "rtems_clock_get" ); printf( "TA2 - RTEMS_EVENT_10 received - eventout => %08x\n", eventout ); print_time( "TA2 - rtems_clock_get - ", &time, "\n" ); puts( "TA2 - rtems_event_receive - RTEMS_PENDING_EVENTS" ); status = rtems_event_receive( RTEMS_PENDING_EVENTS, RTEMS_DEFAULT_OPTIONS, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA2 - eventout => %08x\n", eventout ); puts( "TA2 - rtems_event_receive - RTEMS_EVENT_19 - RTEMS_NO_WAIT" ); status = rtems_event_receive( RTEMS_EVENT_19, RTEMS_NO_WAIT, RTEMS_NO_TIMEOUT, &eventout ); directive_failed( status, "rtems_event_receive" ); printf( "TA2 - RTEMS_EVENT_19 received - eventout => %08x\n", eventout ); puts( "TA2 - rtems_task_delete - deletes self" ); status = rtems_task_delete( Task_id[ 2 ] ); directive_failed( status, "rtems_task_delete of TA2" ); }
static void pppdapp_ipdown_hook(void) { /* send ip down signal to pppdapp task */ rtems_event_send(pppdapp_taskid, RTEMS_EVENT_11); }
static void fec_send_event(rtems_id task) { rtems_event_send(task, FEC_EVENT); }