/*! \details This function initializes \a mutex with \a attr. * \return Zero on success or -1 with \a errno (see \ref errno) set to: * - EINVAL: mutex is NULL */ int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr){ if ( mutex == NULL ){ errno = EINVAL; return -1; } mutex->flags = PTHREAD_MUTEX_FLAGS_INITIALIZED; if ( attr == NULL ){ mutex->prio_ceiling = PTHREAD_MUTEX_PRIO_CEILING; if ( task_get_current() == 0 ){ mutex->pid = 0; } else { mutex->pid = task_get_pid( task_get_current() ); } mutex->lock = 0; mutex->pthread = -1; return 0; } if ( attr->process_shared != 0 ){ //Enter priv mode to modify a shared object mutex->flags |= (PTHREAD_MUTEX_FLAGS_PSHARED); } if ( attr->recursive ){ mutex->flags |= (PTHREAD_MUTEX_FLAGS_RECURSIVE); } mutex->prio_ceiling = attr->prio_ceiling; mutex->pid = task_get_pid( task_get_current() ); mutex->lock = 0; mutex->pthread = -1; return 0; }
static int wait_until_stop_sent(int port) { timestamp_t deadline; timestamp_t slow_cutoff; uint8_t is_slow; deadline = slow_cutoff = get_time(); deadline.val += TIMEOUT_STOP_SENT_US; slow_cutoff.val += SLOW_STOP_SENT_US; while (STM32_I2C_CR1(port) & (1 << 9)) { if (timestamp_expired(deadline, NULL)) { ccprintf("Stop event deadline passed:\ttask=%d" "\tCR1=%016b\n", (int)task_get_current(), STM32_I2C_CR1(port)); return EC_ERROR_TIMEOUT; } if (is_slow) { /* If we haven't gotten a fast response, sleep */ usleep(STOP_SENT_RETRY_US); } else { /* Check to see if this request is taking a while */ if (timestamp_expired(slow_cutoff, NULL)) { ccprintf("Stop event taking a while: task=%d", (int)task_get_current()); is_slow = 1; } } } return EC_SUCCESS; }
uint32_t task_wait_event_mask(uint32_t event_mask, int timeout_us) { uint64_t deadline = get_time().val + timeout_us; uint32_t events = 0; int time_remaining_us = timeout_us; /* Add the timer event to the mask so we can indicate a timeout */ event_mask |= TASK_EVENT_TIMER; while (!(events & event_mask)) { /* Collect events to re-post later */ events |= task_wait_event(time_remaining_us); time_remaining_us = deadline - get_time().val; if (timeout_us > 0 && time_remaining_us <= 0) { /* Ensure we return a TIMER event if we timeout */ events |= TASK_EVENT_TIMER; break; } } /* Re-post any other events collected */ if (events & ~event_mask) tasks[task_get_current()].event |= events & ~event_mask; return events & event_mask; }
void usb_charger_task(void) { int port = (task_get_current() == TASK_ID_USB_CHG_P0 ? 0 : 1); uint32_t evt; /* Initialize chip and enable interrupts */ pi3usb9281_init(port); usb_charger_bc12_detect(port); while (1) { /* Wait for interrupt */ evt = task_wait_event(-1); /* Interrupt from the Pericom chip, determine charger type */ if (evt & USB_CHG_EVENT_BC12) usb_charger_bc12_detect(port); /* * Re-enable interrupts on pericom charger detector since the * chip may periodically reset itself, and come back up with * registers in default state. TODO(crosbug.com/p/33823): Fix * these unwanted resets. */ if (evt & USB_CHG_EVENT_VBUS) { pi3usb9281_enable_interrupts(port); #ifndef CONFIG_USB_PD_TCPM_VBUS CPRINTS("VBUS p%d %d", port, pd_snk_is_vbus_provided(port)); #endif } } }
void priv_device_data_transfer(void * args){ priv_device_data_transfer_t * p = (priv_device_data_transfer_t*)args; _hwpl_core_priv_disable_interrupts(NULL); //no switching until the transfer is started if ( p->read != 0 ){ //Read operation p->ret = p->fs->priv_read(p->fs->cfg, p->handle, &p->op); } else { p->ret = p->fs->priv_write(p->fs->cfg, p->handle, &p->op); } if ( p->ret == 0 ){ if ( (p->op.nbyte >= 0) //wait for the operation to complete or for data to arrive ){ #if SINGLE_TASK == 0 //Block waiting for the operation to complete or new data to be ready sched_table[ task_get_current() ].block_object = (void*)p->handle + p->read; //switch tasks until a signal becomes available sched_priv_update_on_sleep(); #else waiting = true; #endif } } _hwpl_core_priv_enable_interrupts(NULL); }
pid_t _getpid(void){ #if SINGLE_TASK == 0 return (pid_t)task_get_pid( task_get_current() ); #else return 0; #endif }
/*! \details This function causes the calling thread to sleep for \a useconds microseconds. * * \return 0 or -1 for an error with errno (see \ref ERRNO) set to: * - EINVAL: \a useconds is greater than 1 million. */ int usleep(useconds_t useconds){ uint32_t clocks; uint32_t tmp; int i; if ( useconds == 0 ){ return 0; } if ( useconds <= 1000000UL ){ clocks = sched_useconds_to_clocks(useconds); tmp = sched_useconds_to_clocks(1); if( (task_get_current() == 0) || (clocks < 8000) ){ for(i = 0; i < clocks; i+=14){ asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); } } else {
void dma_enable_tc_interrupt(enum dma_channel channel) { stm32_dma_chan_t *chan = dma_get_channel(channel); /* Store task ID so the ISR knows which task to wake */ id[channel] = task_get_current(); chan->ccr |= STM32_DMA_CCR_TCIE; task_enable_irq(dma_get_irq(channel)); }
/** * Flush an ADC sequencer and initiate a read. * * @param seq Sequencer to read * @return Raw ADC value. */ static int flush_and_read(enum lm4_adc_sequencer seq) { /* * This is currently simple because we can dedicate a sequencer to each * ADC channel. If we have enough channels that's no longer possible, * this code will need to become more complex. For example, we could: * * 1) Read them all using a timer interrupt, and then return the most * recent value? This is lowest-latency for the caller, but won't * return accurate data if read frequently. * * 2) Reserve SS3 for reading a single value, and configure it on each * read? Needs mutex if we could have multiple callers; doesn't matter * if just used for debugging. * * 3) Both? */ volatile uint32_t scratch __attribute__((unused)); int event; /* Empty the FIFO of any previous results */ while (!(LM4_ADC_SSFSTAT(seq) & 0x100)) scratch = LM4_ADC_SSFIFO(seq); /* * This assumes we don't have multiple tasks accessing the same * sequencer. Add mutex lock if needed. */ task_waiting_on_ss[seq] = task_get_current(); /* Clear the interrupt status */ LM4_ADC_ADCISC |= 0x01 << seq; /* Enable interrupt */ LM4_ADC_ADCIM |= 0x01 << seq; /* Initiate sample sequence */ LM4_ADC_ADCPSSI |= 0x01 << seq; /* Wait for interrupt */ event = task_wait_event_mask(TASK_EVENT_ADC_DONE, ADC_TIMEOUT_US); /* Disable interrupt */ LM4_ADC_ADCIM &= ~(0x01 << seq); task_waiting_on_ss[seq] = TASK_ID_INVALID; if (!(event & TASK_EVENT_ADC_DONE)) return ADC_READ_ERROR; /* Read the FIFO and convert to temperature */ return LM4_ADC_SSFIFO(seq); }
static int wait_for_interrupt(int port, int *event) { task_waiting_on_port[port] = task_get_current(); task_enable_irq(MEC1322_IRQ_I2C_0 + port); /* * We want to wait here quietly until the I2C interrupt comes * along, but we don't want to lose any pending events that * will be needed by the task that started the I2C transaction * in the first place. So we save them up and restore them when * the I2C is either completed or timed out. Refer to the * implementation of usleep() for a similar situation. */ *event |= (task_wait_event(SECOND) & ~TASK_EVENT_I2C_IDLE); task_waiting_on_port[port] = TASK_ID_INVALID; if (*event & TASK_EVENT_TIMER) { /* Restore any events that we saw while waiting */ task_set_event(task_get_current(), (*event & ~TASK_EVENT_TIMER), 0); return EC_ERROR_TIMEOUT; } return EC_SUCCESS; }
int task_create_process(void (*p)(char*), void (*cleanup)(void*), const char * path_arg, task_memories_t * mem, void * reent_ptr, int parent_id){ int tid; int err; void * stackaddr; new_task_t task; task_memories_t task_memories; static int task_process_counter = 1; //Variable initialization stackaddr = mem->data.addr + mem->data.size; //Check the stack alignment if ( (unsigned int)stackaddr & 0x03 ){ errno = EIO; return -1; } //Initialize the task task.stackaddr = stackaddr; task.start = (uint32_t)p; task.stop = (uint32_t)cleanup; task.r0 = (uint32_t)path_arg; task.r1 = (uint32_t)0; task.pid = task_process_counter++; //Assign a new pid task.reent = (struct _reent*)reent_ptr; task.global_reent = task.reent; task.flags = TASK_FLAGS_USED; task.parent = task_get_current(); task.priority = 0; task.parent = parent_id; memcpy(&task_memories, mem, sizeof(task_memories_t)); if ( (err = task_mpu_calc_protection(&task_memories)) < 0 ){ return err; } task.mem = &task_memories; //Do a priv call while accessing the task table so there are no interruptions cortexm_svcall( (cortexm_svcall_t)task_root_new_task, &task); tid = task.tid; return tid; }
void sched_priv_timedblock(void * block_object, struct sched_timeval * abs_time){ #if SINGLE_TASK == 0 int id; tmr_reqattr_t chan_req; uint32_t now; bool time_sleep; //Initialization id = task_get_current(); sched_table[id].block_object = block_object; time_sleep = false; if (abs_time->tv_sec >= sched_usecond_counter){ sched_table[id].wake.tv_sec = abs_time->tv_sec; sched_table[id].wake.tv_usec = abs_time->tv_usec; if(abs_time->tv_sec == sched_usecond_counter){ hwpl_tmr_off(clk_usecond_tmr, NULL); //stop the timer //Read the current OC value to see if it needs to be updated chan_req.channel = CAOSLIB_USECOND_TMR_SLEEP_OC; hwpl_tmr_getoc(clk_usecond_tmr, &chan_req); if ( abs_time->tv_usec < chan_req.value ){ chan_req.value = abs_time->tv_usec; } //See if abs_time is in the past now = (uint32_t)hwpl_tmr_get(clk_usecond_tmr, NULL); if( abs_time->tv_usec > (now+40) ){ //needs to be enough in the future to allow the OC to be set before the timer passes it hwpl_tmr_setoc(clk_usecond_tmr, &chan_req); time_sleep = true; } hwpl_tmr_on(clk_usecond_tmr, NULL); //start the timer } else { time_sleep = true; } } if ( (block_object == NULL) && (time_sleep == false) ){ //Do not sleep return; } sched_priv_update_on_sleep(); #endif }
uint32_t task_wait_event(int timeout_us) { int tid = task_get_current(); int ret; pthread_mutex_lock(&interrupt_lock); if (timeout_us > 0) tasks[tid].wake_time.val = get_time().val + timeout_us; /* Transfer control to scheduler */ pthread_cond_signal(&scheduler_cond); pthread_cond_wait(&tasks[tid].resume, &run_lock); /* Resume */ ret = tasks[tid].event; tasks[tid].event = 0; pthread_mutex_unlock(&interrupt_lock); return ret; }
static int wait_byte_done(int port) { uint8_t sts = MEC1322_I2C_STATUS(port); int rv; int event = 0; while (sts & STS_PIN) { rv = wait_for_interrupt(port, &event); if (rv) return rv; sts = MEC1322_I2C_STATUS(port); } /* * Restore any events that we saw while waiting. TASK_EVENT_TIMER isn't * one, because we've handled it above. */ task_set_event(task_get_current(), event, 0); return sts & STS_LRB; }
void mutex_lock(struct mutex *mtx) { int value = 0; int id = 1 << task_get_current(); mtx->waiters |= id; do { if (mtx->lock == 0) { mtx->lock = 1; value = 1; } if (!value) /* Contention on the mutex */ /* TODO(crbug.com/435612, crbug.com/435611) * This discards any pending events! */ task_wait_event(-1); } while (!value); mtx->waiters &= ~id; }
void task_abc(void *data) { int task_id = task_get_current(); int id = task_id - TASK_ID_A; task_id_t next = task_id + 1; if (next > TASK_ID_C) next = TASK_ID_A; task_wait_event(-1); CPRINTS("%c Starting", 'A' + id); cflush(); while (1) { wake_count[id]++; if (id == 2 && wake_count[id] == repeat_count) task_set_event(TASK_ID_CTS, TASK_EVENT_WAKE, 1); else task_set_event(next, TASK_EVENT_WAKE, 1); } }
static int wait_idle(int port) { uint8_t sts = MEC1322_I2C_STATUS(port); int rv; int event = 0; while (!(sts & STS_NBB)) { rv = wait_for_interrupt(port, &event); if (rv) return rv; sts = MEC1322_I2C_STATUS(port); } /* * Restore any events that we saw while waiting. TASK_EVENT_TIMER isn't * one, because we've handled it above. */ task_set_event(task_get_current(), event, 0); if (sts & (STS_BER | STS_LAB)) return EC_ERROR_UNKNOWN; return EC_SUCCESS; }
int chip_i2c_xfer(int port, int slave_addr, const uint8_t *out, int out_size, uint8_t *in, int in_size, int flags) { int ctrl = i2c_port_to_controller(port); volatile struct i2c_status *p_status = i2c_stsobjs + ctrl; if (out_size == 0 && in_size == 0) return EC_SUCCESS; interrupt_disable(); /* make sure bus is not occupied by the other task */ if (p_status->task_waiting != TASK_ID_INVALID) { interrupt_enable(); return EC_ERROR_BUSY; } /* Assign current task ID */ p_status->task_waiting = task_get_current(); interrupt_enable(); /* Select port for multi-ports i2c controller */ i2c_select_port(port); /* Copy data to controller struct */ p_status->flags = flags; p_status->tx_buf = out; p_status->sz_txbuf = out_size; p_status->rx_buf = in; p_status->sz_rxbuf = in_size; #if I2C_7BITS_ADDR /* Set slave address from 7-bits to 8-bits */ p_status->slave_addr = (slave_addr<<1); #else /* Set slave address (8-bits) */ p_status->slave_addr = slave_addr; #endif /* Reset index & error */ p_status->idx_buf = 0; p_status->err_code = SMB_OK; /* Make sure we're in a good state to start */ if ((flags & I2C_XFER_START) && (i2c_bus_busy(ctrl) || (i2c_get_line_levels(port) != I2C_LINE_IDLE))) { /* Attempt to unwedge the controller. */ i2c_unwedge(ctrl); /* recovery i2c controller */ i2c_recovery(ctrl); /* Select port again for recovery */ i2c_select_port(port); } CPUTS("\n"); /* Start master transaction */ i2c_master_transaction(ctrl); /* Reset task ID */ p_status->task_waiting = TASK_ID_INVALID; /* Disable SMB interrupt and New Address Match interrupt source */ i2c_interrupt(ctrl, 0); CPRINTS("-Err:0x%02x\n", p_status->err_code); return (p_status->err_code == SMB_OK) ? EC_SUCCESS : EC_ERROR_UNKNOWN; }
/*! \cond */ pid_t _getpid(){ return (pid_t)task_get_pid( task_get_current() ); }
void dma_enable_tc_interrupt(enum dma_channel stream) { dma_enable_tc_interrupt_callback(stream, _dma_wake_callback, (void *)(int)task_get_current()); }
/*! \details This function returns the thread ID of the calling process. * \return The thread ID of the caller. */ pthread_t pthread_self(){ return (pthread_t)task_get_current(); }
static void set_delay_mutex(void * args){ sched_table[ task_get_current() ].signal_delay_mutex = args; }
int device_data_transfer(open_file_t * open_file, void * buf, int nbyte, int read){ int tmp; int mode; priv_device_data_transfer_t args; if ( nbyte == 0 ){ return 0; } args.fs = (const sysfs_t*)open_file->fs; args.handle = (device_t *)open_file->handle; args.read = read; args.op.loc = open_file->loc; args.op.flags = open_file->flags; args.op.buf = buf; args.op.callback = priv_data_transfer_callback; args.op.context = &args; #if SINGLE_TASK == 0 args.op.tid = task_get_current(); #else args.op.tid = 0; #endif if ( (mode = get_mode(args.fs, args.handle)) < 0 ){ return -1; } //privilege call for the operation do { #if SINGLE_TASK > 0 waiting = false; #endif args.op.nbyte = nbyte; args.ret = -101010; //This transfers the data hwpl_core_privcall(priv_device_data_transfer, &args); //We arrive here if the data is done transferring or there is no data to transfer and O_NONBLOCK is set //or if there was an error if( sched_get_unblock_type(task_get_current()) == SCHED_UNBLOCK_SIGNAL ){ unistd_clr_action(open_file); errno = EINTR; return -1; } #if SINGLE_TASK != 0 while ( waiting == true ){ core_sleep(CORE_SLEEP); } #endif if ( args.ret > 0 ){ //The operation happened synchronously tmp = args.ret; break; } else if ( args.ret == 0 ){ //the operation happened asynchronously if ( args.op.nbyte > 0 ){ //The operation has completed and transferred args.op.nbyte bytes tmp = args.op.nbyte; break; } else if ( args.op.nbyte == 0 ){ //There was no data to read/write -- try again if (args.op.flags & O_NONBLOCK ){ errno = ENODATA; return -1; } } else if ( args.op.nbyte < 0 ){ //there was an error executing the operation (or the operation was cancelled) return -1; } } else if ( args.ret < 0 ){ //there was an error starting the operation (such as EAGAIN) if( args.ret == -101010 ){ errno = ENXIO; //this is a rare/strange error where hwpl_core_privcall fails to run properly } return -1; } } while ( args.ret == 0 ); if ( ((mode & S_IFMT) != S_IFCHR) && (tmp > 0) ){ open_file->loc += tmp; } return tmp; }
int chip_i2c_xfer(int port, int slave_addr, const uint8_t *out, int out_size, uint8_t *in, int in_size, int flags) { struct i2c_port_data *pd = pdata + port; uint32_t events = 0; if (out_size == 0 && in_size == 0) return EC_SUCCESS; if (pd->i2ccs) { if ((flags & I2C_XFER_SINGLE) == I2C_XFER_SINGLE) flags &= ~I2C_XFER_START; } /* Copy data to port struct */ pd->out = out; pd->out_size = out_size; pd->in = in; pd->in_size = in_size; pd->flags = flags; pd->widx = 0; pd->ridx = 0; pd->err = 0; pd->addr = slave_addr; if (port < I2C_STANDARD_PORT_COUNT) { /* Make sure we're in a good state to start */ if ((flags & I2C_XFER_START) && (i2c_is_busy(port) || (IT83XX_SMB_HOSTA(port) & HOSTA_ALL_WC_BIT) || (i2c_get_line_levels(port) != I2C_LINE_IDLE))) { /* Attempt to unwedge the port. */ i2c_unwedge(port); /* reset i2c port */ i2c_reset(port, I2C_RC_NO_IDLE_FOR_START); } } else { /* Make sure we're in a good state to start */ if ((flags & I2C_XFER_START) && (i2c_is_busy(port) || (i2c_get_line_levels(port) != I2C_LINE_IDLE))) { /* Attempt to unwedge the port. */ i2c_unwedge(port); /* reset i2c port */ i2c_reset(port, I2C_RC_NO_IDLE_FOR_START); } } pd->task_waiting = task_get_current(); if (pd->flags & I2C_XFER_START) { pd->i2ccs = I2C_CH_NORMAL; /* enable i2c interrupt */ task_clear_pending_irq(i2c_ctrl_regs[port].irq); task_enable_irq(i2c_ctrl_regs[port].irq); } /* Start transaction */ i2c_transaction(port); /* Wait for transfer complete or timeout */ events = task_wait_event_mask(TASK_EVENT_I2C_IDLE, pd->timeout_us); /* disable i2c interrupt */ task_disable_irq(i2c_ctrl_regs[port].irq); pd->task_waiting = TASK_ID_INVALID; /* Handle timeout */ if (!(events & TASK_EVENT_I2C_IDLE)) { pd->err = EC_ERROR_TIMEOUT; /* reset i2c port */ i2c_reset(port, I2C_RC_TIMEOUT); } /* reset i2c channel status */ if (pd->err) pd->i2ccs = I2C_CH_NORMAL; return pd->err; }
/** * Start a PECI transaction * * @param addr client address * @param w_len write length (no include [Cmd Code] and [AW FCS]) * @param r_len read length (no include [FCS]) * @param cmd_code command code * @param *w_buf How buffer pointer of write data * @param *r_buf How buffer pointer of read data * @param timeout_us transaction timeout unit:us * * @return zero if successful, non-zero if error */ static enum peci_status peci_transaction(uint8_t addr, uint8_t w_len, uint8_t r_len, enum peci_command_code cmd_code, uint8_t *w_buf, uint8_t *r_buf, int timeout_us) { uint8_t status; int index; /* * bit5, Both write and read data FIFO pointers will be cleared. * * bit4, This bit enables the PECI host to abort the transaction * when FCS error occurs. * * bit2, This bit enables the contention mechanism of the PECI bus. * When this bit is set, the host will abort the transaction * if the PECI bus is contentious. */ IT83XX_PECI_HOCTLR |= 0x34; /* This register is the target address field of the PECI protocol. */ IT83XX_PECI_HOTRADDR = addr; /* This register is the write length field of the PECI protocol. */ ASSERT(w_len <= PECI_WRITE_DATA_FIFO_SIZE); if (cmd_code == PECI_CMD_PING) { /* write length is 0 */ IT83XX_PECI_HOWRLR = 0x00; } else { if ((cmd_code == PECI_CMD_WR_PKG_CFG) || (cmd_code == PECI_CMD_WR_IAMSR) || (cmd_code == PECI_CMD_WR_PCI_CFG) || (cmd_code == PECI_CMD_WR_PCI_CFG_LOCAL)) { /* write length include Cmd Code + AW FCS */ IT83XX_PECI_HOWRLR = w_len + 2; /* bit1, The bit enables the AW_FCS hardwired mechanism * based on the PECI command. This bit is functional * only when the AW_FCS supported command of * PECI 2.0/3.0/3.1 is issued. * When this bit is set, the hardware will handle the * calculation of AW_FCS. */ IT83XX_PECI_HOCTLR |= 0x02; } else { /* write length include Cmd Code */ IT83XX_PECI_HOWRLR = w_len + 1; IT83XX_PECI_HOCTLR &= ~0x02; } } /* This register is the read length field of the PECI protocol. */ ASSERT(r_len <= PECI_READ_DATA_FIFO_SIZE); IT83XX_PECI_HORDLR = r_len; /* This register is the command field of the PECI protocol. */ IT83XX_PECI_HOCMDR = cmd_code; /* The write data field of the PECI protocol. */ for (index = 0x00; index < w_len; index++) IT83XX_PECI_HOWRDR = w_buf[index]; peci_current_task = task_get_current(); #ifdef CONFIG_IT83XX_PECI_WITH_INTERRUPT task_clear_pending_irq(IT83XX_IRQ_PECI); task_enable_irq(IT83XX_IRQ_PECI); /* start */ IT83XX_PECI_HOCTLR |= 0x01; /* pre-set timeout */ index = timeout_us; if (task_wait_event(timeout_us) != TASK_EVENT_TIMER) index = 0; task_disable_irq(IT83XX_IRQ_PECI); #else /* start */ IT83XX_PECI_HOCTLR |= 0x01; for (index = 0x00; index < timeout_us; index += 16) { if (IT83XX_PECI_HOSTAR & PECI_STATUS_ANY_BIT) break; udelay(15); } #endif peci_current_task = TASK_ID_INVALID; if (index < timeout_us) { status = IT83XX_PECI_HOSTAR; /* any error */ if (IT83XX_PECI_HOSTAR & PECI_STATUS_ANY_ERR) { if (IT83XX_PECI_HOSTAR & PECI_STATUS_ERR_NEED_RST) peci_reset(); } else if (IT83XX_PECI_HOSTAR & PECI_STATUS_FINISH) { /* The read data field of the PECI protocol. */ for (index = 0x00; index < r_len; index++) r_buf[index] = IT83XX_PECI_HORDDR; /* W/C */ IT83XX_PECI_HOSTAR = PECI_STATUS_FINISH; status = IT83XX_PECI_HOSTAR; } } else { /* transaction timeout */ status = PECI_STATUS_TIMEOUT; } /* Don't disable PECI host controller if controller already enable. */ IT83XX_PECI_HOCTLR = 0x08; /* W/C */ IT83XX_PECI_HOSTAR = PECI_STATUS_ANY_BIT; return status; }