void init() { signal(SIGUSR1, &signal1); printf("[Init][Pid: %d] Scheduler Testing Program\n", sched_getpid()); sched_nice(-20); for (int i = 0; i < 15; i++) { if (sched_fork() == 0) { printf("[Child][Pid: %d] Parent Pid: %d\n", sched_getpid(), sched_getppid()); sched_nice(-19 + i); struct timespec start, stop; clock_gettime(CLOCK_REALTIME, &start); for (long int j = 0; j < 1000000000; j++); clock_gettime(CLOCK_REALTIME, &stop); printf("[Child][Pid: %d] Execution Complete. Took %ld Seconds.\n", sched_getpid(), (long int)(stop.tv_sec - start.tv_sec)); sched_exit(i); printf("[Child][Pid: %d] This Will Never Get Executed\n", sched_getpid()); } } printf("[Init][Pid: %d] Process Information\n", sched_getpid()); sched_ps(); for (int i = 0; i < 15; i++) { int returncode; int cc = sched_wait(&returncode); printf("[Init][Pid: %d] Child Returned [%d] With Exit Code [%d]\n", sched_getpid(), cc, returncode); } int returncode; int cc = sched_wait(&returncode); printf("[Init][Pid: %d] Calling Wait With No Children Returns [%d]\n", sched_getpid(), cc); sched_sleep(&wait1); for (int i = 0; i < 15; i++) { if (sched_fork() == 0) { printf("[Child][Pid: %d] Parent Pid: %d\n", sched_getpid(), sched_getppid()); sched_nice(-19 + i); if (i % 2 == 1) sched_sleep(&wait1); else sched_sleep(&wait2); printf("[Child][Pid: %d] Execution Complete.\n", sched_getpid()); sched_exit(i); } } for (int i = 0; i < 1000000000; i++); printf("[Init][Pid: %d] Process Information\n", sched_getpid()); sched_ps(); printf("Wakeup 2\n"); sched_wakeup(&wait2); printf("Wakeup 1\n"); sched_wakeup(&wait1); for (int i = 0; i < 15; i++) { int returncode; int cc = sched_wait(&returncode); printf("[Init][Pid: %d] Child Returned [%d] With Exit Code [%d]\n", sched_getpid(), cc, returncode); } printf("[Init][Pid: %d] Exiting Testing Program. Passing Control Back To Idle\n", sched_getpid()); sched_exit(0); }
/* * Process a write call on a tty device. */ static int tty_write(file_t file, char *buf, size_t *nbyte, int blkno) { struct tty *tp = file->priv; size_t remain, count = 0; unsigned char c; /* must be char (not int) for BIG ENDIAN */ DPRINTF(("tty_write\n")); remain = *nbyte; while (remain > 0) { if (tp->t_outq.tq_count > TTYQ_HIWAT) { tty_start(tp); if (tp->t_outq.tq_count <= TTYQ_HIWAT) continue; tp->t_state |= TS_ASLEEP; sched_sleep(&tp->t_output); continue; } if (umem_copyin(buf, &c, 1)) return EFAULT; tty_output(c, tp); buf++; remain--; count++; } tty_start(tp); *nbyte = count; return 0; }
init_fn() { int i, j, stat; printf("Created init function: "); printf("pid %d / niceval: %d\n",current->pid, current->stat_prio); sched_waitq_init(&wq1); sched_waitq_init(&wq2); // Parent process forks 9 CPU-bound processes for (i=1; i<10; i++) { switch(sched_fork()) { case -1: fprintf(stderr,"Fork failed in pid %d\n",current->pid); return -1; break; case 0: sched_nice(20+i*2); child_fn(); sched_exit(0); break; } } // Parent process waits for a long time before it // does its own CPU-bound operations kill(getpid(),SIGABRT); for (;;) { for (i=0; i<40; i++) sched_sleep(&wq1); kill(getpid(),SIGABRT); waste_time(10); kill(getpid(),SIGABRT); } }
void alt_init() { RCC->AHB1ENR |= RCC_AHB1ENR_GPIOCEN; RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN; __DMB(); // read EEPROM sched_sleep(10); i2c_shared_wait(); i2c_shared_lock(); i2c_polling_start(addr, false); i2c_polling_write(REG_AC1_MSB); i2c_polling_start(addr, true); for (int i=0; i<11; i++) { uint8_t msb = i2c_polling_read(true); uint8_t lsb = i2c_polling_read(i != 10); eeprom.vals[i] = (msb << 8) | lsb; } i2c_polling_stop(); i2c_shared_unlock(); // enable EXTI SYSCFG->EXTICR[1] |= SYSCFG_EXTICR2_EXTI5_PC; EXTI->RTSR |= (1 << PIN_EOC); EXTI->IMR |= (1 << PIN_EOC); util_enable_irq(EXTI9_5_IRQn, IRQ_PRI_HIGH); // jumpstart the async process if (!(GPIOC->IDR & (1 << PIN_EOC))) { i2c_shared_wait(); i2c_shared_lock(); i2c_async_send(addr, cmd_conv_temp, sizeof(cmd_conv_temp), i2c_shared_done_unlock); } else { irq_exti95(); } }
error_t barrier_wait(struct barrier_s *barrier) { register uint_t event; register void *listner; register uint_t ticket; register uint_t index; register uint_t wqdbsz; register wqdb_t *wqdb; register struct thread_s *this; uint_t irq_state; uint_t tm_now; tm_now = cpu_time_stamp(); this = current_thread; index = this->info.order; if((barrier->signature != BARRIER_ID) || ((barrier->owner != NULL) && (barrier->owner != this->task))) return EINVAL; wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t); wqdb = barrier->wqdb_tbl[index / wqdbsz]; #if !(CONFIG_USE_SCHED_LOCKS) event = sched_event_make (this, SCHED_OP_WAKEUP); listner = sched_get_listner(this, SCHED_OP_WAKEUP); #else listner = (void*)this; #endif wqdb->tbl[index % wqdbsz].event = event; wqdb->tbl[index % wqdbsz].listner = listner; #if CONFIG_BARRIER_ACTIVE_WAIT register uint_t current_phase; current_phase = barrier->phase; #endif /* CONFIG_BARRIER_ACTIVE_WAIT */ cpu_disable_all_irq(&irq_state); ticket = arch_barrier_wait(barrier->cluster, barrier->hwid); cpu_restore_irq(irq_state); if(ticket < 0) return EINVAL; if(ticket == barrier->count) barrier->tm_first = tm_now; else if(ticket == 1) barrier->tm_last = tm_now; #if CONFIG_BARRIER_ACTIVE_WAIT while(cpu_uncached_read(&barrier->state[current_phase]) == 0) sched_yield(this); #else sched_sleep(this); #endif /* CONFIG_BARRIER_ACTIVE_WAIT */ return (ticket == 1) ? PTHREAD_BARRIER_SERIAL_THREAD : 0; }
/* * Read */ static int keypad_read(file_t file, char *buf, size_t *nbyte, int blkno) { int rc, c; size_t count; if (input_handler) return EBUSY; if (*nbyte == 0) return 0; if (keyq_empty()) { rc = sched_sleep(&keypad_event); if (rc == SLP_INTR) return EINTR; } for (count = 0; count < *nbyte; count++) { if (keyq_empty()) break; c = keyq_dequeue(); if (umem_copyout(&c, buf, 1)) return EFAULT; buf++; } *nbyte = count; return 0; }
/* * Timer thread. * * Handle all expired timers. Each callout routine is * called with scheduler locked and interrupts enabled. */ static void timer_thread(void *dummy) { struct timer *tmr; splhigh(); for (;;) { /* * Wait until next timer expiration. */ sched_sleep(&timer_event); while (!list_empty(&expire_list)) { /* * callout */ tmr = timer_next(&expire_list); list_remove(&tmr->link); tmr->state = TM_STOP; sched_lock(); spl0(); (*tmr->func)(tmr->arg); /* * Unlock scheduler here in order to give * chance to higher priority threads to run. */ sched_unlock(); splhigh(); } } /* NOTREACHED */ }
void basestation_func(void *unused) { sched_sleep(5000); // give the XBee time to initialize while (true) { int start = sched_now(); send_status_message(); sched_sleep(50 - (sched_now() - start)); if (xbee_receive_avail()) { char buf[128]; XBeeReceiveHeader header; int got = xbee_receive(buf, header); if (buf[0] == MSGID_GAINS) { const GainsMessage &msg = *(GainsMessage *)buf; ControllerGains gains; gains.p = (1e-4f) * VectorF<3>{ (float)msg.roll_p, (float)msg.pitch_p, (float)msg.yaw_p }; gains.d = (1e-4f) * VectorF<3>{ (float)msg.roll_d, (float)msg.pitch_d, (float)msg.yaw_d }; controller_set_gains(gains); } } } }
/* * Wait for output to drain. */ static void tty_wait(struct tty *tp) { /* DPRINTF(("tty_wait\n")); */ if ((tp->t_outq.tq_count > 0) && tp->t_oproc) { tp->t_state |= TS_BUSY; while (1) { (*tp->t_oproc)(tp); if ((tp->t_state & TS_BUSY) == 0) break; tp->t_state |= TS_ASLEEP; sched_sleep(&tp->t_output); } } }
init_fn(){ int i,p; time_t t = time(0); for(i=0;i<SCHED_NPROC-1;i++){ p=sched_fork(); if(p<0){ fprintf(stderr,"fork #%d failed\n",i); exit(-1); } else if(!p){ sched_nice((i%40)-20); break; } } int x; for(x=1;x<1<<DELAY_FACTOR;x++) ; if(current->pid==1){ kill(getpid(),SIGABRT); } else { sched_exit(0); } struct sched_waitq wq1; sched_waitq_init(&wq1); sched_sleep(&wq1); // Sleep Indefinitely }
/* * timer_waitperiod - wait next period of the periodic timer. * * If the caller task receives any exception, this system call * will return before target time. So, the caller must retry * immediately if the error status is EINTR. This will be * automatically done by the library stub routine. */ int timer_waitperiod(void) { struct timer *tmr; int rc; tmr = curthread->periodic; if (tmr == NULL || tmr->state != TM_ACTIVE) return EINVAL; if (time_before(lbolt, tmr->expire)) { /* * Sleep until timer_handler() routine wakes us up. */ rc = sched_sleep(&tmr->event); if (rc != SLP_SUCCESS) return EINTR; } return 0; }
error_t rwlock_wrlock(struct rwlock_s *rwlock) { uint_t irq_state; mcs_lock(&rwlock->lock, &irq_state); //spinlock_lock(&rwlock->lock); if(rwlock->count == 0) { rwlock->count --; //spinlock_unlock(&rwlock->lock); mcs_unlock(&rwlock->lock, irq_state); return 0; } wait_on(&rwlock->wr_wait_queue, WAIT_LAST); //spinlock_unlock_nosched(&rwlock->lock); mcs_unlock(&rwlock->lock, irq_state); sched_sleep(current_thread); return 0; }
void mpu_reset(AccelFS new_accel_fs, GyroFS new_gyro_fs, uint8_t new_dlpf, uint8_t new_samplerate_div) { accel_fs = new_accel_fs; gyro_fs = new_gyro_fs; dlpf = new_dlpf; samplerate_div = new_samplerate_div; writereg(REG_PWR_MGMT_1, 1 << 7); // reset MPU sched_sleep(100); writereg(REG_PWR_MGMT_1, 3); // clock source is z gyro writereg(REG_PWR_MGMT_2, 0); writereg(REG_USER_CTRL, (1 << 4)); // disable I2C interface if (readreg(REG_WHO_AM_I) != 0x68) // check WHO_AM_I register kernel_halt("MPU failed to read WHO_AM_I"); writereg(REG_SMPLRT_DIV, samplerate_div); // set sample rate divisor writereg(REG_CONFIG, dlpf); // set DLPF writereg(REG_GYRO_CONFIG, (int)gyro_fs << 3); // set gyro FS_SEL writereg(REG_ACCEL_CONFIG, (int)accel_fs << 3); // set accel FS_SEL writereg(REG_INT_PIN_CFG, (1 << 5) | (1 << 4)); // turn on LATCH_INT and INT_RD_CLEAR writereg(REG_INT_ENABLE, (1 << 0)); // turn on DATA_RDY_EN }
/* * Process a read call on a tty device. */ static int tty_read(file_t file, char *buf, size_t *nbyte, int blkno) { struct tty *tp = file->priv; unsigned char *cc; struct tty_queue *qp; int rc, c; unsigned char byte; size_t count = 0; tcflag_t lflag; lflag = tp->t_lflag; cc = tp->t_cc; qp = (lflag & ICANON) ? &tp->t_canq : &tp->t_rawq; if ((file->f_flags & O_NONBLOCK) && ttyq_empty(qp)) return EAGAIN; /* If there is no input, wait it */ while (ttyq_empty(qp)) { rc = sched_sleep(&tp->t_input); if (rc == SLP_INTR) return EINTR; } while (count < *nbyte) { if ((c = ttyq_getc(qp)) == -1) break; if (c == cc[VEOF] && (lflag & ICANON)) break; count++; byte = c; /* for BIG_ENDIAN */ if (umem_copyout(&byte, buf, 1)) return EFAULT; if ((lflag & ICANON) && (c == '\n' || c == cc[VEOL])) break; buf++; } *nbyte = count; return 0; }
error_t rwlock_rdlock(struct rwlock_s *rwlock) { uint_t irq_state; //spinlock_lock(&rwlock->lock); mcs_lock(&rwlock->lock, &irq_state); /* priority is given to writers */ if((rwlock->count >= 0) && (wait_queue_isEmpty(&rwlock->wr_wait_queue))) { rwlock->count ++; //spinlock_unlock(&rwlock->lock); mcs_unlock(&rwlock->lock, irq_state); return 0; } wait_on(&rwlock->rd_wait_queue, WAIT_LAST); //spinlock_unlock_nosched(&rwlock->lock); mcs_unlock(&rwlock->lock, irq_state); sched_sleep(current_thread); return 0; }
/* * Receive a message. * * A thread can receive a message from the object which was * created by any thread belongs to same task. If the message * has not reached yet, it blocks until any message comes in. * * The size argument specifies the "maximum" size of the message * buffer to receive. If the sent message is larger than this * size, the kernel will automatically clip the message to this * maximum buffer size. * * When a message is received, the sender thread is removed from * object's send queue. So, another thread can receive the * subsequent message from that object. This is important for * the multi-thread server which must receive multiple messages * simultaneously. */ int msg_receive(object_t obj, void *msg, size_t size) { thread_t t; size_t len; int rc, error = 0; if (!user_area(msg)) return EFAULT; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } if (obj->owner != curtask) { sched_unlock(); return EACCES; } /* * Check if this thread finished previous receive * operation. A thread can not receive different * messages at once. */ if (curthread->recvobj) { sched_unlock(); return EBUSY; } curthread->recvobj = obj; /* * If no message exists, wait until message arrives. */ while (queue_empty(&obj->sendq)) { /* * Block until someone sends a message. */ msg_enqueue(&obj->recvq, curthread); rc = sched_sleep(&ipc_event); if (rc != 0) { /* * Receive is failed due to some reasons. */ switch (rc) { case SLP_INVAL: error = EINVAL; /* Object has been deleted */ break; case SLP_INTR: queue_remove(&curthread->ipc_link); error = EINTR; /* Got exception */ break; default: panic("msg_receive"); break; } curthread->recvobj = NULL; sched_unlock(); return error; } /* * Check the existence of the sender thread again. * Even if this thread is woken by the sender thread, * the message may be received by another thread. * This may happen when another high priority thread * becomes runnable before we receive the message. */ } t = msg_dequeue(&obj->sendq); /* * Copy out the message to the user-space. */ len = MIN(size, t->msgsize); if (len > 0) { if (copyout(t->msgaddr, msg, len)) { msg_enqueue(&obj->sendq, t); curthread->recvobj = NULL; sched_unlock(); return EFAULT; } } /* * Detach the message from the target object. */ curthread->sender = t; t->receiver = curthread; sched_unlock(); return error; }
/* TODO: reintroduce barrier's ops to deal with case-specific treatment */ error_t barrier_wait(struct barrier_s *barrier) { register uint_t ticket; register uint_t index; register uint_t wqdbsz; register wqdb_t *wqdb; register bool_t isShared; struct thread_s *this; uint_t tm_now; tm_now = cpu_time_stamp(); this = current_thread; index = this->info.order; ticket = 0; isShared = (barrier->owner == NULL) ? true : false; if((barrier->signature != BARRIER_ID) || ((isShared == false) && (barrier->owner != this->task))) return EINVAL; wqdbsz = PMM_PAGE_SIZE / sizeof(wqdb_record_t); if(isShared) { spinlock_lock(&barrier->lock); index = barrier->index ++; ticket = barrier->count - index; } wqdb = barrier->wqdb_tbl[index / wqdbsz]; #if CONFIG_USE_SCHED_LOCKS wqdb->tbl[index % wqdbsz].listner = (void*)this; #else uint_t irq_state; cpu_disable_all_irq(&irq_state); /* To prevent against any scheduler intervention */ wqdb->tbl[index % wqdbsz].event = sched_event_make (this, SCHED_OP_WAKEUP); wqdb->tbl[index % wqdbsz].listner = sched_get_listner(this, SCHED_OP_WAKEUP); #endif if(isShared == false) ticket = atomic_add(&barrier->waiting, -1); if(ticket == 1) { #if !(CONFIG_USE_SCHED_LOCKS) cpu_restore_irq(irq_state); #endif barrier->tm_last = tm_now; wqdb->tbl[index % wqdbsz].listner = NULL; if(isShared) { barrier->index = 0; spinlock_unlock(&barrier->lock); } else atomic_init(&barrier->waiting, barrier->count); barrier_do_broadcast(barrier); return PTHREAD_BARRIER_SERIAL_THREAD; } if(ticket == barrier->count) barrier->tm_first = tm_now; spinlock_unlock_nosched(&barrier->lock); sched_sleep(this); #if !(CONFIG_USE_SCHED_LOCKS) cpu_restore_irq(irq_state); #endif return 0; }
void* kvfsd(void *arg) { uint_t tm_now, cntr; struct task_s *task; struct thread_s *this; struct cpu_s *cpu; struct alarm_info_s info; struct event_s event; uint_t fs_type; error_t err; cpu_enable_all_irq(NULL); printk(INFO, "INFO: Starting KVFSD on CPU %d [ %d ]\n", cpu_get_id(), cpu_time_stamp()); task = current_task; fs_type = VFS_TYPES_NR; #if CONFIG_ROOTFS_IS_EXT2 fs_type = VFS_EXT2_TYPE; #endif #if CONFIG_ROOTFS_IS_VFAT #if CONFIG_ROOTFS_IS_EXT2 #error More than one root fs has been selected #endif fs_type = VFS_VFAT_TYPE; #endif /* CONFIG_ROOTFS_IS_VFAT_TYPE */ err = vfs_init(__sys_blk, fs_type, VFS_MAX_NODE_NUMBER, VFS_MAX_FILE_NUMBER, &task->vfs_root); task->vfs_cwd = task->vfs_root; printk(INFO, "INFO: Virtual File System (VFS) Is Ready\n"); sysconf_init(); if(err == 0) { if((err = task_load_init(task))) { printk(WARNING, "WARNING: failed to load user process, err %d [%u]\n", err, cpu_time_stamp()); } } #if CONFIG_DEV_VERSION if(err != 0) { struct thread_s *thread; printk(INFO, "INFO: Creating kernel level terminal\n"); thread = kthread_create(task, &kMiniShelld, NULL, current_cluster->id, current_cpu->lid); thread->task = task; list_add_last(&task->th_root, &thread->rope); err = sched_register(thread); assert(err == 0); sched_add_created(thread); } #endif this = current_thread; cpu = current_cpu; event_set_senderId(&event, this); event_set_priority(&event, E_FUNC); event_set_handler(&event, &kvfsd_alarm_event_handler); info.event = &event; cntr = 0; while(1) { alarm_wait(&info, 10); sched_sleep(this); tm_now = cpu_time_stamp(); printk(INFO, "INFO: System Current TimeStamp %u\n", tm_now); sync_all_pages(); if((cntr % 4) == 0) dqdt_print_summary(dqdt_root); cntr ++; } return NULL; }
int sched::run() { myLogger.lw(INFO,"RUN: Starting SCHED Run..."); message msg; // Starting conditions: int temp = getGV(playFname); if( temp <= 0 ) { play = false; if( setGV(playFname, (int) play) == -1 ) { myLogger.lw(ERROR,"RUN: Error setting current event counter to global var at %s",playFname); } } else { play = (bool) temp; } currEvent = getGV(currEventFname); if( (currEvent < 0) || (currEvent >= numEvents) ) { currEvent = -1; if( setGV(currEventFname, currEvent) == -1 ) { myLogger.lw(ERROR,"RUN: Error setting current event to global var at %s",currEventFname); } } else eventCounter = getGV(eventCounterFname); if( (eventCounter < 0) ) { eventCounter = 0; if( setGV(eventCounterFname, eventCounter) == -1 ) { myLogger.lw(ERROR,"RUN: Error setting current event counter to global var at %s",eventCounterFname); } } timeSlept = getGV(timeSleptFname); if( (timeSlept < 0) ) { timeSlept = 0; if( setGV(timeSleptFname, timeSlept) == -1 ) { myLogger.lw(ERROR,"RUN: Error setting current event counter to global var at %s",timeSleptFname); } } myLogger.lw(INFO,"RUN: Restored current event as %d, count %d, timeslept %d, play %d",currEvent,eventCounter,timeSlept,(int) play); // Update semaphores: setSemaphore(sched_semid,1,(int) play); setSemaphore(sched_semid,2,currEvent); setSemaphore(sched_semid,3,eventCounter); setSemaphore(sched_semid,4,timeSlept); int presleep = 0; if( currEvent > 0 ) { presleep = event[currEvent].sleep; } // Sleep initially and wait for play, or advance when the next event is scheduled. if(sched_sleep(presleep) == -1) { myLogger.lw(INFO,"RUN: Schedule terminated. Dying gracefully."); return 0; } currEvent++; for(; currEvent < numEvents; currEvent++) { // Update semaphore & file: setGV(currEventFname, currEvent); setSemaphore(sched_semid,2,currEvent); // Form message msg.cmd = event[currEvent].cmd; myLogger.lw(INFO,"RUN: Starting event %d. Run %d time(s): %s",event[currEvent].num,event[currEvent].count,msg.toString()); eventCounter = 0; while( (eventCounter < event[currEvent].count) || (event[currEvent].count == -1) ) { // Update semaphore & file: setGV(eventCounterFname, eventCounter); setSemaphore(sched_semid,3,eventCounter); myLogger.lw(INFO,"RUN: Running event %d. Count %d of %d.",event[currEvent].num,eventCounter + 1,event[currEvent].count); // Run command up to 5 times if failures occure: for( int numTrys = 0; numTrys < 5; numTrys++ ) { commandWrapper.execute(&msg); if(msg.rsp.ret != 1) { myLogger.lw(ERROR,"RUN: Event %d returned with error 0x%x on try %d. Sleeping 1 second and trying again.",event[currEvent].num, msg.rsp.ret, numTrys); usleep(1000000); } else { myLogger.lw(INFO,"RUN: Event %d successful on try %d.",event[currEvent].num, numTrys); break; // command success, go to sleepy time. } } if(sched_sleep(event[currEvent].sleep) == -1) { myLogger.lw(INFO,"RUN: Schedule terminated. Dying gracefully."); return 0; } // Increment to next event: eventCounter++; } } myLogger.lw(INFO,"RUN: Schedule completed. Dying gracefully."); return 0; }
int schedule_main(void) /****************************************************************/ { GHP_SCHED_T *pSched; SEND_DATA *pData; int nClientFd = -1; int nClientLength = 0; struct sockaddr_in client_addr; signal(SIGPIPE, SIG_IGN); // Ignore broken_pipe signal. pSched = new_sched(); if (pSched == NULL) { if ( g_dbgShow ) { fprintf( stdout, "Can't create SCHED Memory space.\n" ); fflush(stdout); } //exit(1); system("killall duksan"); } if ( shced_open(pSched) < 0 ) { exit(0); } while (1) { sched_sleep( 0, 500 ); // ready to accept socket nClientFd = accept(pSched->nFd, (struct sockaddr *)&client_addr, &nClientLength); if ( g_dbgShow ) { fprintf( stdout, "[SCHED] Socket Accept OK.\n"); fflush( stdout ); } // check accept error if ( nClientFd == -1 ) { if ( g_dbgShow ) { fprintf( stdout, "+ [SCHED] Accept Error.\n"); fflush( stdout ); } // kill application3 sched_sleep( 3, 0 ); system("killall duksan"); } for (;;) { // receive message sched_sleep(0, 100); pSched->nRecvByte = sched_recv_message(nClientFd, pSched->bRxBuf); // parsing message if( pSched->nRecvByte == 0 ) { if ( g_dbgShow ) { fprintf( stdout, "+ [SCHED] Socket Close.\n"); fflush( stdout ); } // close socket close(nClientFd); // init variable memset( pSched->bTxBuf, 0x00, MAX_BUF_LENGTH ); memset( pSched->bRxBuf, 0x00, MAX_BUF_LENGTH ); memset( pSched->bPrevBuf, 0x00, MAX_BUF_LENGTH ); pSched->nRecvByte = 0; pSched->nTempWp = 0; pSched->nIndexPrev = 0; // wait delay sched_sleep(3, 0); break; } else if ( pSched->nRecvByte > 0 ) { if ( g_dbgShow ) { fprintf( stdout, "+ [SCHED] Receive Byte %d\n", pSched->nRecvByte); fflush( stdout ); } if ( pSched->nIndexPrev + pSched->nRecvByte > MAX_BUF_LENGTH) { ; } else { // copy message memcpy(&pSched->bPrevBuf[pSched->nIndexPrev], pSched->bRxBuf, pSched->nRecvByte); pSched->nIndexPrev = pSched->nIndexPrev + pSched->nRecvByte; if ( g_dbgShow ) { fprintf( stdout, "+ [SCHED] Copy Byte %d\n", pSched->nIndexPrev); fflush( stdout ); } // check message length pData = (SEND_DATA *)pSched->bPrevBuf; if(pSched->nIndexPrev == pData->length && Check_Message(pData)) { Parsing_Data(pData); SetDate(pData); } } } } continue; } }
/* * Send a message. * * The current thread will be blocked until any other thread * receives and reply the message. A thread can send a * message to any object if it knows the object id. */ int msg_send(object_t obj, void *msg, size_t size) { struct msg_header *hdr; thread_t t; void *kmsg; int rc; if (!user_area(msg)) return EFAULT; if (size < sizeof(struct msg_header)) return EINVAL; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } /* * A thread can not send a message when it is * already receiving from the target object. * It will obviously cause a deadlock. */ if (obj == curthread->recvobj) { sched_unlock(); return EDEADLK; } /* * Translate message address to the kernel linear * address. So that a receiver thread can access * the message via kernel pointer. We can catch * the page fault here. */ if ((kmsg = kmem_map(msg, size)) == NULL) { sched_unlock(); return EFAULT; } curthread->msgaddr = kmsg; curthread->msgsize = size; /* * The sender ID is filled in the message header * by the kernel. So, the receiver can trust it. */ hdr = (struct msg_header *)kmsg; hdr->task = curtask; /* * If receiver already exists, wake it up. * The highest priority thread can get the message. */ if (!queue_empty(&obj->recvq)) { t = msg_dequeue(&obj->recvq); sched_unsleep(t, 0); } /* * Sleep until we get a reply message. * Note: Do not touch any data in the object * structure after we wakeup. This is because the * target object may be deleted while we are sleeping. */ curthread->sendobj = obj; msg_enqueue(&obj->sendq, curthread); rc = sched_sleep(&ipc_event); if (rc == SLP_INTR) queue_remove(&curthread->ipc_link); curthread->sendobj = NULL; sched_unlock(); /* * Check sleep result. */ switch (rc) { case SLP_BREAK: return EAGAIN; /* Receiver has been terminated */ case SLP_INVAL: return EINVAL; /* Object has been deleted */ case SLP_INTR: return EINTR; /* Exception */ default: /* DO NOTHING */ break; } return 0; }