static int config_dma(void) { u32 i = 0; pr_debug("%s\n", __func__); /* fill descriptor table */ for (i = 0; i < 326; i++) { /* point to next desc table */ dma_desc_table[2 * i] = (unsigned long)&dma_desc_table[2 * i + 2]; } /* last descriptor points to first */ dma_desc_table[2 * 326] = (unsigned long)&dma_desc_table[0]; #ifdef CONFIG_FB_HITACHI_TX09_LANDSCAPE dma_desc_table[0 + 1] = (unsigned long)fb_buffer; for (i = 0; i < 7; i++) { /* blanking lines point to first line of fb_buffer */ dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer + 319 * 2; } for (i = 7; i < 327; i++) { /* visible lines */ dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer + (319 - (i - 7)) * 2; } #else /* portrait mode */ for (i = 0; i < 7; i++) { /* blanking lines point to first line of fb_buffer */ dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer; } for (i = 7; i < 327; i++) { /* visible lines */ dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer + 2 * 240 * (i - 7); } #endif #ifdef CONFIG_FB_HITACHI_TX09_LANDSCAPE set_dma_x_count(CH_PPI, 240); set_dma_x_modify(CH_PPI, 2 * 320); set_dma_y_count(CH_PPI, 0); set_dma_y_modify(CH_PPI, 0); set_dma_next_desc_addr(CH_PPI, (unsigned long)dma_desc_table[2 * 326]); #else set_dma_x_count(CH_PPI, 240); set_dma_x_modify(CH_PPI, 2); set_dma_y_count(CH_PPI, 0); set_dma_y_modify(CH_PPI, 0); set_dma_next_desc_addr(CH_PPI, (unsigned long)dma_desc_table[2 * 326]); #endif set_dma_config(CH_PPI, 0x7404); return 0; }
static int dma_init(void) { int ret; /* Request DMA channel */ ret = request_dma(CH_PPI, DRIVER_NAME); if(ret < 0) { printk(KERN_WARNING DRIVER_NAME ": Could not allocate DMA channel\n"); return ret; } /* Disable channel while it is being configured */ disable_dma(CH_PPI); /* Allocate buffer space for the DMA engine to use */ dma_buffer = __get_dma_pages(GFP_KERNEL, page_alloc_order(BUFFER_SIZE * BUFFER_COUNT)); if(dma_buffer == 0) { printk(KERN_WARNING DRIVER_NAME ": Could not allocate dma_pages\n"); free_dma(CH_PPI); return -ENOMEM; } /* Invalid caching on the DMA buffer */ invalidate_dcache_range(dma_buffer, dma_buffer + (BUFFER_SIZE * BUFFER_COUNT)); /* Set DMA configuration */ set_dma_start_addr(CH_PPI, dma_buffer); set_dma_config(CH_PPI, (DMAFLOW_AUTO | WNR | RESTART | DI_EN | WDSIZE_16 | DMA2D | DI_SEL)); set_dma_x_count(CH_PPI, SAMPLES_PER_BUFFER * CHANNELS); set_dma_x_modify(CH_PPI, SAMPLE_SIZE); set_dma_y_count(CH_PPI, BUFFER_COUNT); set_dma_y_modify(CH_PPI, SAMPLE_SIZE); set_dma_callback(CH_PPI, &buffer_full_handler, NULL); return 0; }
static int config_dma(void) { u32 i; if(landscape) { for (i=0;i<U_LINES;i++) { //blanking lines point to first line of fb_buffer dma_desc_table[2*i] = (unsigned long)&dma_desc_table[2*i+2]; dma_desc_table[2*i+1] = (unsigned long)fb_buffer; } for (i=U_LINES;i<U_LINES+LCD_Y_RES;i++) { // visible lines dma_desc_table[2*i] = (unsigned long)&dma_desc_table[2*i+2]; dma_desc_table[2*i+1] = (unsigned long)fb_buffer + (LCD_Y_RES+U_LINES-1-i)*2; } //last descriptor points to first dma_desc_table[2*(LCD_Y_RES+U_LINES-1)] = (unsigned long)&dma_desc_table[0]; set_dma_x_count(CH_PPI, LCD_X_RES); set_dma_x_modify(CH_PPI, LCD_Y_RES*(LCD_BBP/8)); set_dma_y_count(CH_PPI, 0); set_dma_y_modify(CH_PPI, 0); set_dma_next_desc_addr(CH_PPI, (void *)dma_desc_table[0]); set_dma_config(CH_PPI, DMAFLOW_LARGE | NDSIZE_4 | WDSIZE_16); } else { set_dma_config(CH_PPI, set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, INTR_DISABLE, DIMENSION_2D, DATA_SIZE_16, DMA_NOSYNC_KEEP_DMA_BUF)); set_dma_x_count(CH_PPI, LCD_X_RES); set_dma_x_modify(CH_PPI,LCD_BBP/8); set_dma_y_count(CH_PPI, LCD_Y_RES+U_LINES); set_dma_y_modify(CH_PPI, LCD_BBP/8); set_dma_start_addr(CH_PPI, ((unsigned long) fb_buffer)); } return 0; }
static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev) { #ifdef CONFIG_SIR_BFIN_DMA dma_addr_t dma_handle; #endif /* CONFIG_SIR_BFIN_DMA */ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n"); return -EBUSY; } if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n"); free_dma(port->rx_dma_channel); return -EBUSY; } #ifdef CONFIG_SIR_BFIN_DMA set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); port->rx_dma_buf.head = 0; port->rx_dma_buf.tail = 0; port->rx_dma_nrows = 0; set_dma_config(port->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT); set_dma_x_modify(port->rx_dma_channel, 1); set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT); set_dma_y_modify(port->rx_dma_channel, 1); set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf); enable_dma(port->rx_dma_channel); port->rx_dma_timer.data = (unsigned long)(dev); port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout; #else if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n"); return -EBUSY; } if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n"); free_irq(port->irq, dev); return -EBUSY; } #endif return 0; }
static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params) { const struct ppi_info *info = ppi->info; int dma32 = 0; int dma_config, bytes_per_line, lines_per_frame; bytes_per_line = params->width * params->bpp / 8; lines_per_frame = params->height; if (params->int_mask == 0xFFFFFFFF) ppi->err_int = false; else ppi->err_int = true; dma_config = (DMA_FLOW_STOP | WNR | RESTART | DMA2D | DI_EN); ppi->ppi_control = params->ppi_control & ~PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; if (params->ppi_control & DMA32) dma32 = 1; bfin_write16(®->control, ppi->ppi_control); bfin_write16(®->count, bytes_per_line - 1); bfin_write16(®->frame, lines_per_frame); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; if ((params->ppi_control & PACK_EN) || (params->ppi_control & 0x38000) > DLEN_16) dma32 = 1; bfin_write32(®->control, ppi->ppi_control); bfin_write16(®->line, bytes_per_line + params->blank_clocks); bfin_write16(®->frame, lines_per_frame); bfin_write16(®->hdelay, 0); bfin_write16(®->vdelay, 0); bfin_write16(®->hcount, bytes_per_line); bfin_write16(®->vcount, lines_per_frame); break; } default: return -EINVAL; } if (dma32) { dma_config |= WDSIZE_32; set_dma_x_count(info->dma_ch, bytes_per_line >> 2); set_dma_x_modify(info->dma_ch, 4); set_dma_y_modify(info->dma_ch, 4); } else {
static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params) { const struct ppi_info *info = ppi->info; int dma32 = 0; int dma_config, bytes_per_line; int hcount, hdelay, samples_per_line; bytes_per_line = params->width * params->bpp / 8; /* convert parameters unit from pixels to samples */ hcount = params->width * params->bpp / params->dlen; hdelay = params->hdelay * params->bpp / params->dlen; samples_per_line = params->line * params->bpp / params->dlen; if (params->int_mask == 0xFFFFFFFF) ppi->err_int = false; else ppi->err_int = true; dma_config = (DMA_FLOW_STOP | RESTART | DMA2D | DI_EN_Y); ppi->ppi_control = params->ppi_control & ~PORT_EN; if (!(ppi->ppi_control & PORT_DIR)) dma_config |= WNR; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; if (params->ppi_control & DMA32) dma32 = 1; bfin_write16(®->control, ppi->ppi_control); bfin_write16(®->count, samples_per_line - 1); bfin_write16(®->frame, params->frame); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; if ((params->ppi_control & PACK_EN) || (params->ppi_control & 0x38000) > DLEN_16) dma32 = 1; bfin_write32(®->control, ppi->ppi_control); bfin_write16(®->line, samples_per_line); bfin_write16(®->frame, params->frame); bfin_write16(®->hdelay, hdelay); bfin_write16(®->vdelay, params->vdelay); bfin_write16(®->hcount, hcount); bfin_write16(®->vcount, params->height); break; } case PPI_TYPE_EPPI3: { struct bfin_eppi3_regs *reg = info->base; if ((params->ppi_control & PACK_EN) || (params->ppi_control & 0x70000) > DLEN_16) dma32 = 1; bfin_write32(®->ctl, ppi->ppi_control); bfin_write32(®->line, samples_per_line); bfin_write32(®->frame, params->frame); bfin_write32(®->hdly, hdelay); bfin_write32(®->vdly, params->vdelay); bfin_write32(®->hcnt, hcount); bfin_write32(®->vcnt, params->height); if (params->int_mask) bfin_write32(®->imsk, params->int_mask & 0xFF); break; } default: return -EINVAL; } if (dma32) { dma_config |= WDSIZE_32 | PSIZE_32; set_dma_x_count(info->dma_ch, bytes_per_line >> 2); set_dma_x_modify(info->dma_ch, 4); set_dma_y_modify(info->dma_ch, 4); } else {
/* * FUNCTION NAME: ppi_read * * INPUTS/OUTPUTS: * in_filp - Description of openned file. * in_count - how many bytes user wants to get. * out_buf - data would be write to this address. * * RETURN * positive number: bytes read back * -EINVIL When word size is set to 16, reading odd bytes. * -EAGAIN When reading mode is set to non block and there is no rx data. * * FUNCTION(S) CALLED: * * GLOBAL VARIABLES REFERENCED: ppiinfo * * GLOBAL VARIABLES MODIFIED: NIL * * DESCRIPTION: It is invoked when user call 'read' system call * to read from system. * * CAUTION: */ static ssize_t ppi_read(struct file *filp, char *buf, size_t count, loff_t * pos) { int ierr; ppi_device_t *pdev = filp->private_data; pr_debug("ppi_read:\n"); if (count <= 0) return 0; pdev->done = 0; /* Invalidate allocated memory in Data Cache */ blackfin_dcache_invalidate_range((u_long) buf, (u_long) (buf + count)); pr_debug("ppi_read: blackfin_dcache_invalidate_range : DONE\n"); /* configure ppi port for DMA RX */ set_dma_config(CH_PPI, pdev->dma_config); set_dma_start_addr(CH_PPI, (u_long) buf); set_dma_x_count(CH_PPI, pdev->pixel_per_line / 2); // Div 2 because of 16-bit packing set_dma_y_count(CH_PPI, pdev->lines_per_frame); set_dma_y_modify(CH_PPI, 2); if (pdev->bpp > 8 || pdev->dma_config & WDSIZE_16) set_dma_x_modify(CH_PPI, 2); else set_dma_x_modify(CH_PPI, 1); pr_debug("ppi_read: SETUP DMA : DONE\n"); enable_dma(CH_PPI); /* Enable PPI */ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); SSYNC(); if (pdev->ppi_trigger_gpio > NO_TRIGGER) { gpio_set_value(pdev->ppi_trigger_gpio, 1); udelay(1); gpio_set_value(pdev->ppi_trigger_gpio, 0); } pr_debug("ppi_read: PPI ENABLED : DONE\n"); /* Wait for data available */ if (1) { if (pdev->nonblock) return -EAGAIN; else { pr_debug("PPI wait_event_interruptible\n"); ierr = wait_event_interruptible(*(pdev->rx_avail), pdev->done); if (ierr) { /* waiting is broken by a signal */ pr_debug("PPI wait_event_interruptible ierr\n"); return ierr; } } } pr_debug("PPI wait_event_interruptible done\n"); disable_dma(CH_PPI); pr_debug("ppi_read: return\n"); return count; }
static int bfin_serial_startup(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA dma_addr_t dma_handle; if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n"); return -EBUSY; } if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n"); free_dma(uart->rx_dma_channel); return -EBUSY; } set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart); set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart); uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); uart->rx_dma_buf.head = 0; uart->rx_dma_buf.tail = 0; uart->rx_dma_nrows = 0; set_dma_config(uart->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); set_dma_x_modify(uart->rx_dma_channel, 1); set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); set_dma_y_modify(uart->rx_dma_channel, 1); set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf); enable_dma(uart->rx_dma_channel); uart->rx_dma_timer.data = (unsigned long)(uart); uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout; uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; add_timer(&(uart->rx_dma_timer)); #else #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled) kgdboc_break_enabled = 0; else { # endif if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, "BFIN_UART_RX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); return -EBUSY; } if (request_irq (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED, "BFIN_UART_TX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n"); free_irq(uart->port.irq, uart); return -EBUSY; } # ifdef CONFIG_BF54x { unsigned uart_dma_ch_rx, uart_dma_ch_tx; switch (uart->port.irq) { case IRQ_UART3_RX: uart_dma_ch_rx = CH_UART3_RX; uart_dma_ch_tx = CH_UART3_TX; break; case IRQ_UART2_RX: uart_dma_ch_rx = CH_UART2_RX; uart_dma_ch_tx = CH_UART2_TX; break; default: uart_dma_ch_rx = uart_dma_ch_tx = 0; break; }; if (uart_dma_ch_rx && request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE"Fail to attach UART interrupt\n"); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } if (uart_dma_ch_tx && request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Fail to attach UART interrupt\n"); free_dma(uart_dma_ch_rx); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } } # endif #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) } # endif #endif UART_SET_IER(uart, ERBFI); return 0; }
static int bfin_serial_startup(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA dma_addr_t dma_handle; if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n"); return -EBUSY; } if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n"); free_dma(uart->rx_dma_channel); return -EBUSY; } set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart); set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart); uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); uart->rx_dma_buf.head = 0; uart->rx_dma_buf.tail = 0; uart->rx_dma_nrows = 0; set_dma_config(uart->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); set_dma_x_modify(uart->rx_dma_channel, 1); set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); set_dma_y_modify(uart->rx_dma_channel, 1); set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf); enable_dma(uart->rx_dma_channel); uart->rx_dma_timer.data = (unsigned long)(uart); uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout; uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; add_timer(&(uart->rx_dma_timer)); #else # if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled) kgdboc_break_enabled = 0; else { # endif if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, "BFIN_UART_RX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); return -EBUSY; } if (request_irq (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED, "BFIN_UART_TX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n"); free_irq(uart->port.irq, uart); return -EBUSY; } # ifdef CONFIG_BF54x { /* * UART2 and UART3 on BF548 share interrupt PINs and DMA * controllers with SPORT2 and SPORT3. UART rx and tx * interrupts are generated in PIO mode only when configure * their peripheral mapping registers properly, which means * request corresponding DMA channels in PIO mode as well. */ unsigned uart_dma_ch_rx, uart_dma_ch_tx; switch (uart->port.irq) { case IRQ_UART3_RX: uart_dma_ch_rx = CH_UART3_RX; uart_dma_ch_tx = CH_UART3_TX; break; case IRQ_UART2_RX: uart_dma_ch_rx = CH_UART2_RX; uart_dma_ch_tx = CH_UART2_TX; break; default: uart_dma_ch_rx = uart_dma_ch_tx = 0; break; }; if (uart_dma_ch_rx && request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE"Fail to attach UART interrupt\n"); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } if (uart_dma_ch_tx && request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Fail to attach UART interrupt\n"); free_dma(uart_dma_ch_rx); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } } # endif # if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) } # endif #endif #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) { if (request_irq(gpio_to_irq(uart->cts_pin), bfin_serial_mctrl_cts_int, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED, "BFIN_UART_CTS", uart)) { uart->cts_pin = -1; pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n"); } } if (uart->rts_pin >= 0) { gpio_direction_output(uart->rts_pin, 0); } #endif #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->cts_pin >= 0 && request_irq(uart->status_irq, bfin_serial_mctrl_cts_int, IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { uart->cts_pin = -1; pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n"); } /* CTS RTS PINs are negative assertive. */ UART_PUT_MCR(uart, ACTS); UART_SET_IER(uart, EDSSI); #endif UART_SET_IER(uart, ERBFI); return 0; }