static int _arc_v2_irq_unit_suspend(struct device *dev) { u8_t irq; ARG_UNUSED(dev); /* Interrupts from 0 to 15 are exceptions and they are ignored * by IRQ auxiliary registers. For that reason we skip those * values in this loop. */ for (irq = 16; irq < CONFIG_NUM_IRQS; irq++) { _arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq); ctx.irq_config[irq - 16] = _arc_v2_aux_reg_read(_ARC_V2_IRQ_PRIORITY) << 2; ctx.irq_config[irq - 16] |= _arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER) << 1; ctx.irq_config[irq - 16] |= _arc_v2_aux_reg_read(_ARC_V2_IRQ_ENABLE); } ctx.irq_ctrl = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_CTRL); ctx.irq_vect_base = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE); _arc_v2_irq_unit_device_power_state = DEVICE_PM_SUSPEND_STATE; return 0; }
static inline uint32_t _i2c_qse_ss_reg_read(struct device *dev, uint32_t reg) { struct i2c_qse_ss_rom_config * const rom = dev->config->config_info; return _arc_v2_aux_reg_read(rom->base_address + reg); }
static int _arc_v2_irq_unit_resume(struct device *dev) { u8_t irq; u32_t status32; ARG_UNUSED(dev); /* Interrupts from 0 to 15 are exceptions and they are ignored * by IRQ auxiliary registers. For that reason we skip those * values in this loop. */ for (irq = 16; irq < CONFIG_NUM_IRQS; irq++) { _arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq); _arc_v2_aux_reg_write(_ARC_V2_IRQ_PRIORITY, ctx.irq_config[irq - 16] >> 2); _arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, (ctx.irq_config[irq - 16] >> 1) & BIT(0)); _arc_v2_aux_reg_write(_ARC_V2_IRQ_ENABLE, ctx.irq_config[irq - 16] & BIT(0)); } _arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, ctx.irq_ctrl); _arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE, ctx.irq_vect_base); status32 = _arc_v2_aux_reg_read(_ARC_V2_STATUS32); status32 |= _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); __builtin_arc_kflag(status32); _arc_v2_irq_unit_device_power_state = DEVICE_PM_ACTIVE_STATE; return 0; }
/* * @brief Output a character to serial port * @port: port number * @c: character to output */ unsigned char uart_poll_out(int port, unsigned char c) { /* wait for transmitter to ready to accept a character */ while ((_arc_v2_aux_reg_read(STATUS_REG(port)) & TXEMPTY) == 0) ; _arc_v2_aux_reg_write(DATA_REG(port), c); return c; }
/* * @brief Output a character to serial port * * @param dev UART device struct * @param c character to output */ unsigned char uart_nsim_poll_out(struct device *dev, unsigned char c) { /* wait for transmitter to ready to accept a character */ while ((_arc_v2_aux_reg_read(STATUS_REG(dev)) & TXEMPTY) == 0) ; _arc_v2_aux_reg_write(DATA_REG(dev), c); return c; }
static void invalidate_dcache(void) { unsigned int val; val = _arc_v2_aux_reg_read(_ARC_V2_D_CACHE_BUILD); val &= 0xff; /* version field */ if (val == 0) { return; /* skip if d-cache is not present */ } _arc_v2_aux_reg_write(_ARC_V2_DC_IVDC, 1); }
static void disable_icache(void) { unsigned int val; val = _arc_v2_aux_reg_read(_ARC_V2_I_CACHE_BUILD); val &= 0xff; /* version field */ if (val == 0) { return; /* skip if i-cache is not present */ } _arc_v2_aux_reg_write(_ARC_V2_IC_IVIC, 0); __asm__ __volatile__ ("nop"); _arc_v2_aux_reg_write(_ARC_V2_IC_CTRL, 1); }
/* * @brief Read 64-bit timestamp value * * This function returns a 64-bit bit time stamp value that is clocked * at the same frequency as the CPU. * * @return 64-bit time stamp value */ u64_t _tsc_read(void) { unsigned int key; u64_t t; u32_t count; key = irq_lock(); t = (u64_t)_sys_clock_tick_count; count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); irq_unlock(key); t *= (u64_t)sys_clock_hw_cycles_per_tick; t += (u64_t)count; return t; }
static void adjust_vector_table_base(void) { extern struct vector_table _VectorTable; unsigned int vbr; /* if the compiled-in vector table is different * from the base address known by the ARC CPU, * set the vector base to the compiled-in address. */ vbr = _arc_v2_aux_reg_read(_ARC_V2_IRQ_VECT_BASE); vbr &= 0xfffffc00; if (vbr != (unsigned int)&_VectorTable) { _arc_v2_aux_reg_write(_ARC_V2_IRQ_VECT_BASE, (unsigned int)&_VectorTable); } }
/** * * @brief Kernel fatal error handler * * This routine is called when fatal error conditions are detected by software * and is responsible only for reporting the error. Once reported, it then * invokes the user provided routine _SysFatalErrorHandler() which is * responsible for implementing the error handling policy. * * The caller is expected to always provide a usable ESF. In the event that the * fatal error does not have a hardware generated ESF, the caller should either * create its own or use a pointer to the global default ESF <_default_esf>. * * @return This function does not return. */ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf) { switch (reason) { case _NANO_ERR_HW_EXCEPTION: break; #if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING) case _NANO_ERR_STACK_CHK_FAIL: printk("***** Stack Check Fail! *****\n"); break; #endif case _NANO_ERR_ALLOCATION_FAIL: printk("**** Kernel Allocation Failure! ****\n"); break; case _NANO_ERR_KERNEL_OOPS: printk("***** Kernel OOPS! *****\n"); break; case _NANO_ERR_KERNEL_PANIC: printk("***** Kernel Panic! *****\n"); break; default: printk("**** Unknown Fatal Error %d! ****\n", reason); break; } printk("Current thread ID = %p\n" "Faulting instruction address = 0x%lx\n", k_current_get(), _arc_v2_aux_reg_read(_ARC_V2_ERET)); /* * Now that the error has been reported, call the user implemented * policy * to respond to the error. The decisions as to what responses are * appropriate to the various errors are something the customer must * decide. */ _SysFatalErrorHandler(reason, pEsf); for (;;) ; }
void dw_ss_adc_err_ISR_proc(struct device *dev) { struct adc_config *config = dev->config->config_info; struct adc_info *info = dev->driver_data; uint32_t adc_base = config->reg_base; uint32_t reg_val = _arc_v2_aux_reg_read(adc_base + ADC_SET); _arc_v2_aux_reg_write( adc_base + ADC_CTRL, ADC_INT_DSB|ADC_CLK_ENABLE|ADC_SEQ_PTR_RST); _arc_v2_aux_reg_write( adc_base + ADC_CTRL, reg_val | ADC_FLUSH_RX); info->state = ADC_STATE_IDLE; _arc_v2_aux_reg_write( adc_base +ADC_CTRL, ADC_INT_DSB|ADC_CLK_ENABLE|ADC_CLR_OVERFLOW|ADC_CLR_UNDRFLOW); if (likely( NULL != info->err_cb)) { info->err_cb( dev ); } }
void _sys_soc_power_state_post_ops(enum power_states state) { u32_t limit; switch (state) { case SYS_POWER_STATE_CPU_LPS_1: /* Expire the timer as it is disabled in SS2. */ limit = _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT); _arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, limit - 1); case SYS_POWER_STATE_CPU_LPS: __builtin_arc_seti(0); break; case SYS_POWER_STATE_DEEP_SLEEP: qm_ss_power_soc_lpss_disable(); /* If flag is cleared it means the system entered in * sleep state while we were in LPS. In that case, we * must set ARC_READY flag so x86 core can continue * its execution. */ if ((QM_SCSS_GP->gp0 & GP0_BIT_SLEEP_READY) == 0) { _quark_se_ss_ready(); __builtin_arc_seti(0); } else { QM_SCSS_GP->gp0 &= ~GP0_BIT_SLEEP_READY; QM_SCSS_GP->gps0 &= ~QM_GPS0_BIT_SENSOR_WAKEUP; } break; case SYS_POWER_STATE_DEEP_SLEEP_1: case SYS_POWER_STATE_DEEP_SLEEP_2: /* Route RTC interrupt to the current core */ QM_IR_UNMASK_INTERRUPTS(QM_INTERRUPT_ROUTER->rtc_0_int_mask); __builtin_arc_seti(0); break; break; default: break; } }
/** * * @brief Get contents of Timer0 count register * * @return Current Timer0 count */ static ALWAYS_INLINE u32_t timer0_count_register_get(void) { return _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); }
/** * * @brief Get contents of Timer0 limit register * * @return N/A */ static ALWAYS_INLINE u32_t timer0_limit_register_get(void) { return _arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT); }
/** * * @brief Get contents of Timer0 control register * * @return N/A */ static ALWAYS_INLINE u32_t timer0_control_register_get(void) { return _arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL); }
void dw_ss_adc_rx_ISR_proc(struct device *dev) { struct adc_config *config; struct device_config *dev_config; struct adc_info *info; uint32_t adc_base; uint32_t i, reg_val, rx_cnt; uint32_t rd = 0; uint32_t idx; dev_config = dev->config; config = dev_config->config_info; info = dev->driver_data; adc_base = config->reg_base; idx = info->index; if (IO_ADC_SEQ_MODE_REPETITIVE == config->seq_mode) { if (NULL == info->rx_buf[idx]) { goto cli; } rx_cnt = (config->fifo_tld + 1); } else { rx_cnt = info->seq_size; } if (rx_cnt > info->rx_len) { rx_cnt = info->rx_len; } for (i = 0; i < rx_cnt; i++) { reg_val = _arc_v2_aux_reg_read( adc_base + ADC_SET ); _arc_v2_aux_reg_write( adc_base + ADC_SET, reg_val|ADC_POP_SAMPLE); rd = _arc_v2_aux_reg_read( adc_base + ADC_SAMPLE ); info->rx_buf[idx][i] = rd; } info->rx_buf[idx] += i; info->rx_len -= i; if (0 == info->rx_len) { if (likely( NULL != info->rx_cb)) { info->rx_cb(dev); } if (IO_ADC_SEQ_MODE_SINGLESHOT == config->seq_mode) { _arc_v2_aux_reg_write( adc_base + ADC_CTRL, ADC_INT_DSB|ADC_CLK_ENABLE|ADC_SEQ_PTR_RST); reg_val = _arc_v2_aux_reg_read( adc_base + ADC_SET ); _arc_v2_aux_reg_write( adc_base + ADC_SET, reg_val | ADC_FLUSH_RX); info->state = ADC_STATE_IDLE; goto cli; } info->rx_buf[idx] = NULL; idx++; idx %= BUFS_NUM; info->index = idx; } else if (IO_ADC_SEQ_MODE_SINGLESHOT == config->seq_mode) { _arc_v2_aux_reg_write( adc_base + ADC_CTRL, ADC_INT_DSB|ADC_CLK_ENABLE|ADC_SEQ_PTR_RST); info->state = ADC_STATE_IDLE; if (likely( NULL != info->rx_cb)) { info->rx_cb(dev); } } cli: reg_val = _arc_v2_aux_reg_read( adc_base + ADC_CTRL ); _arc_v2_aux_reg_write( adc_base + ADC_CTRL, reg_val | ADC_CLR_DATA_A); }
unsigned int _arc_v2_irq_unit_trigger_get(int irq) { _arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq); return _arc_v2_aux_reg_read(_ARC_V2_IRQ_TRIGGER); }