/** * * @brief initialize the MVIC IO APIC and local APIC register sets. * * This routine initializes the Quark D2000 Interrupt Controller (MVIC). * This routine replaces the standard Local APIC / IO APIC init routines. * * @returns: N/A */ static int _mvic_init(struct device *unused) { ARG_UNUSED(unused); int i; /* By default mask all interrupt lines */ for (i = 0; i < MVIC_NUM_RTES; i++) { _mvic_rte_set(i, MVIC_IOWIN_MASK); } /* reset the task priority and timer initial count registers */ sys_write32(0, MVIC_TPR); sys_write32(0, MVIC_ICR); /* Initialize and mask the timer interrupt. * Bits 0-3 program the interrupt line number we will use * for the timer interrupt. */ __ASSERT(CONFIG_MVIC_TIMER_IRQ < 16, "Bad irq line %d chosen for timer irq", CONFIG_MVIC_TIMER_IRQ); sys_write32(MVIC_LVTTIMER_MASK | CONFIG_MVIC_TIMER_IRQ, MVIC_LVTTIMER); /* discard a pending interrupt if any */ sys_write32(0, MVIC_EOI); return 0; }
int pinmux_initialize(struct device *port) { int i=0; quark_se_pinmux_initialize_common(port, mux_config); PIN_CONFIG(mux_config, 0, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 1, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 2, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 3, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 8, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 9, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 55, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 56, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 57, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 63, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 64, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 65, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 66, PINMUX_FUNC_B); for (i = 0; i < PINMUX_MAX_REGISTERS; i++) { sys_write32(mux_config[i], PINMUX_SELECT_REGISTER(board_pmux.base_address, i)); } return DEV_OK; }
/** * * @brief write to 32 bit MVIC IO APIC register * * @param irq INTIN number * @param value value to be written * * @returns N/A */ static void _mvic_rte_set(unsigned int irq, u32_t value) { int key; /* interrupt lock level */ u32_t regsel; __ASSERT(!(value & ~MVIC_IOWIN_SUPPORTED_BITS_MASK), "invalid IRQ flags %" PRIx32 " for irq %d", value, irq); regsel = compute_ioregsel(irq); /* lock interrupts to ensure indirect addressing works "atomically" */ key = irq_lock(); sys_write32(regsel, MVIC_IOREGSEL); sys_write32(value, MVIC_IOWIN); irq_unlock(key); }
void _arch_irq_disable(unsigned int irq) { if (irq == CONFIG_MVIC_TIMER_IRQ) { sys_write32(sys_read32(MVIC_LVTTIMER) | MVIC_LVTTIMER_MASK, MVIC_LVTTIMER); } else { _mvic_rte_update(irq, MVIC_IOWIN_MASK, MVIC_IOWIN_MASK); } }
/** * @brief SPI module data pull (read) operation. * @param dev Pointer to the device structure for the driver instance * @return None. */ static void spi_k64_pull_data(struct device *dev) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *spi_data = dev->driver_data; uint16_t data; #ifdef CONFIG_SPI_DEBUG uint32_t cnt = 0; /* # of bytes pulled */ #endif DBG("spi_k64_pull_data - "); do { /* initial status already checked by spi_k64_isr() */ if (spi_data->rx_buf && spi_data->rx_buf_len > 0) { data = (uint16_t)sys_read32(info->regs + SPI_K64_REG_POPR); if (spi_data->frame_sz > CHAR_BIT) { /* store 2nd byte with frame sizes larger than 8 bits */ *((uint16_t *)(spi_data->rx_buf)) = data; spi_data->rx_buf += 2; spi_data->rx_buf_len -= 2; #ifdef CONFIG_SPI_DEBUG cnt+=2; #endif } else { *(spi_data->rx_buf) = (uint8_t)data; spi_data->rx_buf++; spi_data->rx_buf_len--; #ifdef CONFIG_SPI_DEBUG cnt++; #endif } /* Clear interrupt */ sys_write32(SPI_K64_SR_RFDF, (info->regs + SPI_K64_REG_SR)); } else { /* No buffer to store data to */ break; } } while (sys_read32(info->regs + SPI_K64_REG_SR) & SPI_K64_SR_RFDF); DBG("pulled: %d\n", cnt); }
/** * * @brief modify interrupt line register. * * @param irq INTIN number * @param value value to be written * @param mask of bits to be modified * * @returns N/A */ static void _mvic_rte_update(unsigned int irq, u32_t value, u32_t mask) { int key; u32_t regsel, old_value, updated_value; __ASSERT(!(value & ~MVIC_IOWIN_SUPPORTED_BITS_MASK), "invalid IRQ flags %" PRIx32 " for irq %d", value, irq); regsel = compute_ioregsel(irq); key = irq_lock(); sys_write32(regsel, MVIC_IOREGSEL); old_value = sys_read32(MVIC_IOWIN); updated_value = (old_value & ~mask) | (value & mask); sys_write32(updated_value, MVIC_IOWIN); irq_unlock(key); }
static inline int quark_se_clock_control_off(struct device *dev, clock_control_subsys_t sub_system) { struct quark_se_clock_control_config *info = dev->config->config_info; uint32_t subsys = POINTER_TO_INT(sub_system); if (sub_system == CLOCK_CONTROL_SUBSYS_ALL) { DBG("Disabling all clock gates on dev %p\n", dev); sys_write32(0x00000000, info->base_address); return DEV_OK; } DBG("clock gate on dev %p subsystem %u\n", dev, subsys); return sys_test_and_clear_bit(info->base_address, subsys); }
static void _pinmux_defaults(u32_t base) { u32_t mux_config[PINMUX_MAX_REGISTERS] = { 0, 0, 0, 0, 0 }; int i = 0; #if !defined(CONFIG_SPI_1) && !defined(CONFIG_SPI_CS_GPIO) PIN_CONFIG(mux_config, 0, PINMUX_FUNC_B); #endif PIN_CONFIG(mux_config, 1, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 2, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 3, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 4, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 5, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 7, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 8, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 9, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 14, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 16, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 17, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 40, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 41, PINMUX_FUNC_B); #ifdef CONFIG_SPI_1 PIN_CONFIG(mux_config, 42, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 43, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 44, PINMUX_FUNC_B); #ifndef CONFIG_SPI_CS_GPIO PIN_CONFIG(mux_config, 45, PINMUX_FUNC_B); #endif #endif PIN_CONFIG(mux_config, 55, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 56, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 57, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 63, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 64, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 65, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 66, PINMUX_FUNC_B); for (i = 0; i < PINMUX_MAX_REGISTERS; i++) { sys_write32(mux_config[i], PINMUX_SELECT_REGISTER(base, i)); } }
static void _pinmux_defaults(u32_t base) { u32_t mux_config[PINMUX_MAX_REGISTERS] = { 0, 0 }; int i = 0; PIN_CONFIG(mux_config, 0, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 3, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 4, PINMUX_FUNC_B); PIN_CONFIG(mux_config, 6, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 7, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 12, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 13, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 14, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 15, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 16, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 17, PINMUX_FUNC_C); PIN_CONFIG(mux_config, 18, PINMUX_FUNC_C); for (i = 0; i < PINMUX_MAX_REGISTERS; i++) { sys_write32(mux_config[i], PINMUX_SELECT_REGISTER(base, i)); } }
int quark_se_ipm_controller_initialize(struct device *d) { struct quark_se_ipm_controller_config_info *config = d->config->config_info; #if CONFIG_IPM_QUARK_SE_MASTER int i; /* Mask all mailbox interrupts, we'll enable them * individually later. Clear out any pending messages */ sys_write32(0xFFFFFFFF, QUARK_SE_IPM_MASK); for (i = 0; i < QUARK_SE_IPM_CHANNELS; ++i) { volatile struct quark_se_ipm *ipm = QUARK_SE_IPM(i); ipm->sts.sts = 0; ipm->sts.irq = 0; } #endif if (config->controller_init) { return config->controller_init(); } return 0; }
static inline void gpio_dw_unmask_int(uint32_t mask_addr) { sys_write32(sys_read32(mask_addr) & INT_UNMASK_IA, mask_addr); }
/** * @brief Configure the SPI host controller for operating against slaves * @param dev Pointer to the device structure for the driver instance * @param config Pointer to the application provided configuration * * @return DEV_OK if successful, another DEV_* code otherwise. */ static int spi_k64_configure(struct device *dev, struct spi_config *config) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *spi_data = dev->driver_data; uint32_t flags = config->config; uint32_t mcr; /* mode configuration attributes, for MCR */ uint32_t ctar = 0; /* clocking and timing attributes, for CTAR */ uint32_t frame_sz; /* frame size, in bits */ DBG("spi_k64_configure: dev %p (regs @ 0x%x), ", dev, info->regs); DBG("config 0x%x, freq 0x%x", config->config, config->max_sys_freq); /* Disable transfer operations during configuration */ spi_k64_halt(dev); /* * Set the common configuration: * Master mode, normal SPI transfers, PCS strobe disabled, * Rx overflow data ignored, PCSx inactive low signal, Doze disabled, * Rx/Tx FIFOs enabled. * * Also, keep transfers disabled. */ mcr = SPI_K64_MCR_MSTR | SPI_K64_MCR_HALT; /* Set PCSx signal polarities and continuous SCK, as requested */ mcr |= (SPI_K64_MCR_PCSIS_SET(SPI_PCS_POL_GET(flags)) | SPI_K64_MCR_CONT_SCKE_SET(SPI_CONT_SCK_GET(flags))); sys_write32(mcr, (info->regs + SPI_K64_REG_MCR)); /* Set clocking and timing parameters */ /* SCK polarity and phase, and bit order of data */ if (flags & SPI_MODE_CPOL) { ctar |= SPI_K64_CTAR_CPOL; } if (flags & SPI_MODE_CPHA) { ctar |= SPI_K64_CTAR_CPHA; } if (flags & SPI_TRANSFER_MASK) { ctar |= SPI_K64_CTAR_LSBFE; } /* * Frame size is limited to 16 bits (vs. 8 bit value in struct spi_config), * programmed as: (frame_size - 1) */ if ((frame_sz = SPI_WORD_SIZE_GET(flags)) > SPI_K64_WORD_SIZE_MAX) { return DEV_INVALID_OP; } spi_data->frame_sz = frame_sz; ctar |= (SPI_K64_CTAR_FRMSZ_SET(frame_sz - 1)); /* Set baud rate and signal timing parameters (delays) */ if (spi_k64_set_baud_rate(config->max_sys_freq, &ctar) == 0) { return DEV_INVALID_OP; } /* * Set signal timing parameters (delays): * - PCS to SCK delay is set to the minimum, CTAR[PCSSCK] = CTAR[CSSCK] = 0; * - After SCK delay is set to at least half of the baud rate period, * (using the combination of CTAR[PASC] and CTAR[ASC]); and * - Delay after transfer is set to the minimum, CTAR[PDT] = CTAR[DT] = 0. */ if (spi_k64_set_delay(DELAY_AFTER_SCK, (NSEC_PER_SEC / 2) / config->max_sys_freq, &ctar) == 0) { return DEV_INVALID_OP; } DBG("spi_k64_configure: MCR: 0x%x CTAR0: 0x%x\n", mcr, ctar); sys_write32(ctar, (info->regs + SPI_K64_REG_CTAR0)); /* Initialize Tx/Rx parameters */ spi_data->tx_buf = spi_data->rx_buf = NULL; spi_data->tx_buf_len = spi_data->rx_buf_len = 0; /* Store continuous slave/PCS signal selection mode */ spi_data->cont_pcs_sel = SPI_CONT_PCS_GET(flags); return DEV_OK; }
static inline void _i2c_qse_ss_memory_write(uint32_t base_addr, uint32_t offset, uint32_t val) { sys_write32(val, base_addr + offset); }
static inline void eth_write(uint32_t base_addr, uint32_t offset, uint32_t val) { sys_write32(val, base_addr + offset); }
static void dma_stm32_write(struct dma_stm32_device *ddata, u32_t reg, u32_t val) { sys_write32(val, ddata->base + reg); }
/** * @brief Read and/or write a defined amount of data through an SPI driver * * @param dev Pointer to the device structure for the driver instance * @param tx_buf Memory buffer that data should be transferred from * @param tx_buf_len Size of the memory buffer available for reading from * @param rx_buf Memory buffer that data should be transferred to * @param rx_buf_len Size of the memory buffer available for writing to * * @return DEV_OK if successful, another DEV_* code otherwise. */ static int spi_k64_transceive(struct device *dev, uint8_t *tx_buf, uint32_t tx_buf_len, uint8_t *rx_buf, uint32_t rx_buf_len) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *spi_data = dev->driver_data; uint32_t int_config; /* interrupt configuration */ DBG("spi_k64_transceive: dev %p, Tx buf %p, ", dev, tx_buf); DBG("Tx len %u, Rx buf %p, Rx len %u\n", tx_buf_len, rx_buf, rx_buf_len); /* Check parameters */ if ((tx_buf_len && (tx_buf == NULL)) || (rx_buf_len && (rx_buf == NULL))) { DBG("spi_k64_transceive: ERROR - NULL buffer\n"); return DEV_INVALID_OP; } /* Check Tx FIFO status */ if (tx_buf_len && ((sys_read32(info->regs + SPI_K64_REG_SR) & SPI_K64_SR_TFFF) == 0)) { DBG("spi_k64_transceive: Tx FIFO is full\n"); return DEV_USED; } /* Set buffers info */ spi_data->tx_buf = tx_buf; spi_data->tx_buf_len = tx_buf_len; spi_data->rx_buf = rx_buf; spi_data->rx_buf_len = rx_buf_len; /* enable transfer operations - must be done before enabling interrupts */ spi_k64_start(dev); /* * Enable interrupts: * - Transmit FIFO Fill (Tx FIFO not full); and/or * - Receive FIFO Drain (Rx FIFO not empty); * * Note: DMA requests are not supported. */ int_config = sys_read32(info->regs + SPI_K64_REG_RSER); if (tx_buf_len) { int_config |= SPI_K64_RSER_TFFF_RE; } if (rx_buf_len) { int_config |= SPI_K64_RSER_RFDF_RE; } sys_write32(int_config, (info->regs + SPI_K64_REG_RSER)); /* wait for transfer to complete */ device_sync_call_wait(&spi_data->sync_info); /* check completion status */ if (spi_data->error) { spi_data->error = 0; return DEV_FAIL; } return DEV_OK; }
int spi_k64_init(struct device *dev) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *data = dev->driver_data; uint32_t mcr; dev->driver_api = &k64_spi_api; /* Enable module clocking */ sys_set_bit(info->clk_gate_reg, info->clk_gate_bit); /* * Ensure module operation is stopped and enabled before writing anything * more to the registers. * (Clear MCR[MDIS] and set MCR[HALT].) */ DBG("halt\n"); mcr = SPI_K64_MCR_HALT; sys_write32(mcr, (info->regs + SPI_K64_REG_MCR)); while (sys_read32(info->regs + SPI_K64_REG_SR) & SPI_K64_SR_TXRXS) { DBG("SPI Controller dev %p is running. Waiting for Halt.\n", dev); } /* Clear Tx and Rx FIFOs */ mcr |= (SPI_K64_MCR_CLR_RXF | SPI_K64_MCR_CLR_TXF); DBG("fifo clr\n"); sys_write32(mcr, (info->regs + SPI_K64_REG_MCR)); /* Set master mode */ mcr = SPI_K64_MCR_MSTR | SPI_K64_MCR_HALT; DBG("master mode\n"); sys_write32(mcr, (info->regs + SPI_K64_REG_MCR)); /* Disable SPI module interrupt generation */ DBG("irq disable\n"); sys_write32(0, (info->regs + SPI_K64_REG_RSER)); /* Clear status */ DBG("status clr\n"); sys_write32((SPI_K64_SR_RFDF | SPI_K64_SR_RFOF | SPI_K64_SR_TFUF | SPI_K64_SR_EOQF | SPI_K64_SR_TCF), (info->regs + SPI_K64_REG_SR)); /* Set up the synchronous call mechanism */ device_sync_call_init(&data->sync_info); /* Configure and enable SPI module IRQs */ info->config_func(); irq_enable(info->irq); /* * Enable Rx overflow interrupt generation. * Note that Tx underflow is only generated when in slave mode. */ DBG("rxfifo overflow enable\n"); sys_write32(SPI_K64_RSER_RFOF_RE, (info->regs + SPI_K64_REG_RSER)); DBG("K64 SPI Driver initialized on device: %p\n", dev); /* operation remains disabled (MCR[HALT] = 1)*/ return DEV_OK; }
/** * @brief Complete SPI module data transfer operations. * @param dev Pointer to the device structure for the driver instance * @param error Error condition (0 = no error, otherwise an error occurred) * @return None. */ static void spi_k64_complete(struct device *dev, uint32_t error) { struct spi_k64_data *spi_data = dev->driver_data; struct spi_k64_config *info = dev->config->config_info; uint32_t int_config; /* interrupt configuration */ if (error) { DBG("spi_k64_complete - ERROR condition\n"); goto complete; } /* Check for a completed transfer */ if (spi_data->tx_buf && (spi_data->tx_buf_len == 0) && !spi_data->rx_buf) { /* disable Tx interrupts */ int_config = sys_read32(info->regs + SPI_K64_REG_RSER); int_config &= ~SPI_K64_RSER_TFFF_RE; sys_write32(int_config, (info->regs + SPI_K64_REG_RSER)); } else if (spi_data->rx_buf && (spi_data->rx_buf_len == 0) && !spi_data->tx_buf) { /* disable Rx interrupts */ int_config = sys_read32(info->regs + SPI_K64_REG_RSER); int_config &= ~SPI_K64_RSER_RFDF_RE; sys_write32(int_config, (info->regs + SPI_K64_REG_RSER)); } else if (spi_data->tx_buf && spi_data->tx_buf_len == 0 && spi_data->rx_buf && spi_data->rx_buf_len == 0) { /* disable Tx, Rx interrupts */ int_config = sys_read32(info->regs + SPI_K64_REG_RSER); int_config &= ~(SPI_K64_RSER_TFFF_RE | SPI_K64_RSER_RFDF_RE); sys_write32(int_config, (info->regs + SPI_K64_REG_RSER)); } else { return; } complete: spi_data->tx_buf = spi_data->rx_buf = NULL; spi_data->tx_buf_len = spi_data->rx_buf_len = 0; /* Disable transfer operations */ spi_k64_halt(dev); /* Save status */ spi_data->error = error; /* Signal completion */ device_sync_call_complete(&spi_data->sync_info); }
/** * @brief SPI module data push (write) operation. * @param dev Pointer to the device structure for the driver instance * @return None. */ static void spi_k64_push_data(struct device *dev) { struct spi_k64_config *info = dev->config->config_info; struct spi_k64_data *spi_data = dev->driver_data; uint32_t data; #ifdef CONFIG_SPI_DEBUG uint32_t cnt = 0; /* # of bytes pushed */ #endif DBG("spi_k64_push_data - "); do { /* initial status already checked by spi_k64_isr() */ if (spi_data->tx_buf && (spi_data->tx_buf_len > 0)) { if (spi_data->frame_sz > CHAR_BIT) { /* get 2nd byte with frame sizes larger than 8 bits */ data = (uint32_t)(*(uint16_t *)(spi_data->tx_buf)); spi_data->tx_buf += 2; spi_data->tx_buf_len -= 2; #ifdef CONFIG_SPI_DEBUG cnt+=2; #endif } else { data = (uint32_t)(*(spi_data->tx_buf)); spi_data->tx_buf++; spi_data->tx_buf_len--; #ifdef CONFIG_SPI_DEBUG cnt++; #endif } /* Write data to the selected slave */ if (spi_data->cont_pcs_sel && (spi_data->tx_buf_len == 0)) { /* clear continuous PCS enabling in the last frame */ sys_write32((data | SPI_K64_PUSHR_PCS_SET(spi_data->pcs)), (info->regs + SPI_K64_REG_PUSHR)); } else { sys_write32((data | SPI_K64_PUSHR_PCS_SET(spi_data->pcs) | SPI_K64_PUSHR_CONT_SET(spi_data->cont_pcs_sel)), (info->regs + SPI_K64_REG_PUSHR)); } /* Clear interrupt */ sys_write32(SPI_K64_SR_TFFF, (info->regs + SPI_K64_REG_SR)); } else { /* Nothing more to push */ break; } } while (sys_read32(info->regs + SPI_K64_REG_SR) & SPI_K64_SR_TFFF); DBG("pushed: %d\n", cnt); }
static inline void gpio_dw_unmask_int(uint32_t mask_addr) { sys_write32(sys_read32(mask_addr) & INT_ENABLE_ARC, mask_addr); }
static int _loapic_init(struct device *unused) { ARG_UNUSED(unused); s32_t loApicMaxLvt; /* local APIC Max LVT */ /* enable the Local APIC */ sys_write32(sys_read32(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_SVR) | LOAPIC_ENABLE, CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_SVR); loApicMaxLvt = (*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_VER) & LOAPIC_MAXLVT_MASK) >> 16; /* reset the DFR, TPR, TIMER_CONFIG, and TIMER_ICR */ *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_DFR) = (int)0xffffffff; *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TPR) = (int)0x0; *(volatile int *) (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER_CONFIG) = (int)0x0; *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER_ICR) = (int)0x0; /* program Local Vector Table for the Virtual Wire Mode */ /* set LINT0: extInt, high-polarity, edge-trigger, not-masked */ *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_LINT0) = (*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_LINT0) & ~(LOAPIC_MODE | LOAPIC_LOW | LOAPIC_LEVEL | LOAPIC_LVT_MASKED)) | (LOAPIC_EXT | LOAPIC_HIGH | LOAPIC_EDGE); /* set LINT1: NMI, high-polarity, edge-trigger, not-masked */ *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_LINT1) = (*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_LINT1) & ~(LOAPIC_MODE | LOAPIC_LOW | LOAPIC_LEVEL | LOAPIC_LVT_MASKED)) | (LOAPIC_NMI | LOAPIC_HIGH | LOAPIC_EDGE); /* lock the Local APIC interrupts */ *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER) = LOAPIC_LVT_MASKED; *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_ERROR) = LOAPIC_LVT_MASKED; if (loApicMaxLvt >= LOAPIC_LVT_P6) *(volatile int *) (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_PMC) = LOAPIC_LVT_MASKED; if (loApicMaxLvt >= LOAPIC_LVT_PENTIUM4) *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_THERMAL) = LOAPIC_LVT_MASKED; #if CONFIG_LOAPIC_SPURIOUS_VECTOR *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_SVR) = (*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_SVR) & 0xFFFFFF00) | (LOAPIC_SPURIOUS_VECTOR_ID & 0xFF); #endif /* discard a pending interrupt if any */ #if CONFIG_EOI_FORWARDING_BUG _lakemont_eoi(); #else *(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI) = 0; #endif return 0; }