/* * Initialises the CMOS RTC driver. */ static void* rtc_init(device_t *dev) { // Read initial RTC values rtc_read(); // Enable the periodic IRQ IRQ_OFF(); io_outb(CMOS_REG_PORT, 0x8B); uint8_t reg = io_inb(CMOS_DATA_PORT); io_outb(CMOS_REG_PORT, 0x8B); io_outb(CMOS_DATA_PORT, reg | 0x40); // Rate is 2 Hz io_outb(CMOS_REG_PORT, 0x8A); reg = io_inb(CMOS_DATA_PORT); io_outb(CMOS_REG_PORT, 0x8A); io_outb(CMOS_DATA_PORT, (reg & 0xF0) | 0x0F); // Install IRQ handler hal_register_irq_handler(8, rtc_sys_tick, NULL); // Re-enable IRQs IRQ_RES(); return BUS_NO_INIT_DATA; }
/* Duty cycle is expressed from 0% to 100% */ void task_set_ttl(int pid, int duty_cycle_or_preload) { IRQ_OFF(); task_t * task = (task_t*)list_get(tasklist, pid)->value; if(task) task_set_ttl(task, duty_cycle_or_preload); IRQ_RES(); }
void task_set_ttl_fscale(int pid, int fscale) { IRQ_OFF(); task_t * task = (task_t*)list_get(tasklist, pid)->value; if(task) task_set_ttl_fscale(task->pid, fscale); IRQ_RES(); }
void tasking_install(void) { IRQ_OFF(); /* Install the pit callback, which is acting as a callback service: */ MOD_IOCTL("pit_driver", 1, (uintptr_t)"pit_switch_task", (uintptr_t)pit_switch_task); /* Initialize the very first task, which is the main thread that was already running: */ current_task = main_task = task_create((char*)"rootproc", 0, Kernel::CPU::read_reg(Kernel::CPU::eflags), (uint32_t)Kernel::Memory::Man::curr_dir->table_entries); /* Initialize task list and tree: */ tasklist = list_create(); tasktree = tree_create(); tree_set_root(tasktree, main_task); list_insert(tasklist, main_task); tasking_enable(1); /* Allow tasking to work */ is_tasking_initialized = 1; IRQ_RES(); /* Kickstart tasking */ /* Test tasking: */ task_t * t1 = task_create_and_run((char*)"task1", task1, current_task->regs->eflags, current_task->regs->cr3); task_t * t2 = task_create_and_run((char*)"task2", task2, current_task->regs->eflags, current_task->regs->cr3); task_set_ttl_mode(t1->pid, 0); task_set_ttl_mode(t2->pid, 1); task_set_ttl_fscale(t1, 1000); task_set_ttl(t1, 80); task_set_ttl(t2, 10); }
void task_set_ttl_mode(int pid, char pwm_or_pulse_mode) { IRQ_OFF(); task_t * task = (task_t*)list_get(tasklist, pid)->value; if(task) task_set_ttl_mode(task, pwm_or_pulse_mode); IRQ_RES(); }
void task_set_ttl_fscale(task_t * task, int fscale) { IRQ_OFF(); if(task) { task->ttl_fscale = fscale <= 0 ? MAX_TTL : (fscale >= MAX_TTL ? MAX_TTL : fscale); task_set_ttl(task, 100); /* Set default duty cycle to 100% */ } IRQ_RES(); }
static void task1(void) { for(;;) { IRQ_OFF(); Point p = Kernel::term.go_to(50, 0); Kernel::term.printf("TASK1 (pulse) %d ", ctr1++); Kernel::term.go_to(p.X, p.Y); IRQ_RES(); } }
static void task2(void) { for(;;) { IRQ_OFF(); Point p = Kernel::term.go_to(50, 1); Kernel::term.printf("TASK2 (pwm) %d ", ctr2++); Kernel::term.go_to(p.X, p.Y); IRQ_RES(); } }
static void au1xmmc_send_pio(struct au1xmmc_host *host) { struct mmc_data *data = 0; int sg_len, max, count = 0; unsigned char *sg_ptr; u32 status = 0; struct scatterlist *sg; data = host->mrq->data; if (!(host->flags & HOST_F_XMIT)) return; /* This is the pointer to the data buffer */ sg = &data->sg[host->pio.index]; sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset; /* This is the space left inside the buffer */ sg_len = data->sg[host->pio.index].length - host->pio.offset; /* Check to if we need less then the size of the sg_buffer */ max = (sg_len > host->pio.len) ? host->pio.len : sg_len; if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; for(count = 0; count < max; count++ ) { unsigned char val; status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) break; val = *sg_ptr++; au_writel((unsigned long) val, HOST_TXPORT(host)); au_sync(); } host->pio.len -= count; host->pio.offset += count; if (count == sg_len) { host->pio.index++; host->pio.offset = 0; } if (host->pio.len == 0) { IRQ_OFF(host, SD_CONFIG_TH); if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } }
static void au1xmmc_send_pio(struct au1xmmc_host *host) { struct mmc_data *data; int sg_len, max, count; unsigned char *sg_ptr, val; u32 status; struct scatterlist *sg; data = host->mrq->data; if (!(host->flags & HOST_F_XMIT)) return; sg = &data->sg[host->pio.index]; sg_ptr = sg_virt(sg) + host->pio.offset; sg_len = data->sg[host->pio.index].length - host->pio.offset; max = (sg_len > host->pio.len) ? host->pio.len : sg_len; if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) break; val = *sg_ptr++; au_writel((unsigned long)val, HOST_TXPORT(host)); au_sync(); } host->pio.len -= count; host->pio.offset += count; if (count == sg_len) { host->pio.index++; host->pio.offset = 0; } if (host->pio.len == 0) { IRQ_OFF(host, SD_CONFIG_TH); if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } }
task_t * task_create_and_run(char * task_name, task_t * parent_task, void (*entry)(void), uint32_t eflags, uint32_t pagedir) { /* Create task: */ task_t * new_task = task_create(task_name, entry, eflags, pagedir); IRQ_OFF(); /* Keep IRQs off */ /* Add it to the tree (and by adding, the switcher function will now switch to this process): */ task_run(parent_task, new_task); IRQ_RES(); return new_task; }
/* Duty cycle is expressed from 0% to 100% */ void task_set_ttl(task_t * task, int duty_cycle_or_preload) { IRQ_OFF(); if(task){ if(task->ttl_pwm_mode) { task->ttl_start = task->ttl_fscale - ((duty_cycle_or_preload * task->ttl_fscale) / 100); /* Duty cycle */ task->ttl = 0; } else { task->ttl_start = task->ttl = ((duty_cycle_or_preload * task->ttl_fscale) / 100); /* Preload value */ } } IRQ_RES(); }
void task_set_ttl_mode(task_t * task, char pwm_or_pulse_mode) { IRQ_OFF(); if(task) { if(pwm_or_pulse_mode) { task->ttl_pwm_mode = 1; task->ttl = task->ttl_start = 0; } else { task->ttl_pwm_mode = 0; task->ttl = task->ttl_start = task->ttl_fscale; } } IRQ_RES(); }
/****************************** Task creation ******************************/ task_t * task_create(char * task_name, void (*entry)(void), uint32_t eflags, uint32_t pagedir) { IRQ_OFF(); task_t * task = new task_t; task->regs = new regs_t; task->syscall_regs = new regs_t; task->regs->eflags = eflags; task->regs->cr3 = pagedir; task->state = TASKST_CRADLE; task->name = task_name; task->regs->eax = task->regs->ebx = task->regs->ecx = task->regs->edx = task->regs->esi = task->regs->edi = 0; task->next = 0; task->ttl = task->ttl_start = 0; task->ttl_fscale = MAX_TTL; task->ttl_pwm_mode = 1; if(entry) { task->regs->eip = (uint32_t) entry; task->regs->esp = (uint32_t) malloc(TASK_STACK_SIZE) + TASK_STACK_SIZE; /* Set next pid: */ task->pid = ++next_pid; /* Prepare X86 stack: */ uint32_t * stack_ptr = (uint32_t*)(task->regs->esp); /* Parse it and configure it: */ Kernel::CPU::x86_stack_t * stack = (Kernel::CPU::x86_stack_t*)stack_ptr; stack->regs2.ebp = (uint32_t)(stack_ptr + 28); stack->old_ebp = (uint32_t)(stack_ptr + 32); stack->old_addr = (unsigned)task_return_grave; stack->ds = X86_SEGMENT_USER_DATA; stack->cs = X86_SEGMENT_USER_CODE; stack->eip = task->regs->eip; stack->eflags.interrupt = 1; stack->eflags.iopl = 3; stack->esp = task->regs->esp; stack->ss = X86_SEGMENT_USER_DATA; stack->regs2.eax = (uint32_t)task_return_grave; /* Return address of a task */ } else { if(!is_tasking_initialized) { /* If entry is null, then we're allocating the very first process, which is the main core task */ task->pid = next_pid; } /* else we ignore it, we don't want to run a normal task with an entry point of address 0! */ } IRQ_RES(); return task; }
void task_kill(int pid) { if(!pid) return; /* Do not let the main task kill itself */ if(!irq_already_off) IRQ_OFF(); kprintf("\nKILLED PID: %d\n", pid); /* Remove the task from the list, then cleanup the rest (remove process from tree, deallocate task's stack and more) */ task_t * task_to_kill = (task_t*)list_get(tasklist, pid)->value; /* Store it first so we can clean up everything */ list_remove(tasklist, pid); tasklist_size--; /* Remove from tree: */ /* Cleanup everything else: */ task_free(task_to_kill); irq_already_off = 0; IRQ_RES(); /* Resume switching */ }
/* * Initialises the driver, based on the base addresses from the PCI controller */ ata_driver_t* ata_init_pci(uint32_t BAR0, uint32_t BAR1, uint32_t BAR2, uint32_t BAR3, uint32_t BAR4) { int i, j, k, count = 0; int channel, device; // Set up memory for the driver ata_driver_t* driver = (ata_driver_t *) kmalloc(sizeof(ata_driver_t)); // Initialise memory if needed if(driver) { memclr(driver, sizeof(ata_driver_t)); } else { return NULL; } IRQ_OFF(); // Allocate some buffer memory uint8_t *ide_buf = (uint8_t *) kmalloc(2048); memclr(ide_buf, 2048); // Copy the BAR addresses driver->BAR0 = BAR0; driver->BAR1 = BAR1; driver->BAR2 = BAR2; driver->BAR3 = BAR3; driver->BAR4 = BAR4; // Set up IO ports to control the IDE controller with driver->channels[ATA_PRIMARY].base = (BAR0 & 0xFFFFFFFC) + 0x1F0 * (!BAR0); driver->channels[ATA_PRIMARY].ctrl = (BAR1 & 0xFFFFFFFC) + 0x3F6 * (!BAR1); driver->channels[ATA_PRIMARY].bmide = (BAR4 & 0xFFFFFFFC) + 0; // Bus Master IDE driver->channels[ATA_SECONDARY].base = (BAR2 & 0xFFFFFFFC) + 0x170 * (!BAR2); driver->channels[ATA_SECONDARY].ctrl = (BAR3 & 0xFFFFFFFC) + 0x376 * (!BAR3); driver->channels[ATA_SECONDARY].bmide = (BAR4 & 0xFFFFFFFC) + 8; // Bus Master IDE // Disable IRQs for both channels driver->channels[ATA_PRIMARY].nIEN = 0x02; driver->channels[ATA_SECONDARY].nIEN = 0x02; ata_reg_write(driver, ATA_PRIMARY, ATA_REG_CONTROL, driver->channels[ATA_PRIMARY].nIEN); ata_reg_write(driver, ATA_SECONDARY, ATA_REG_CONTROL, driver->channels[ATA_SECONDARY].nIEN); // Probe for devices for(channel = 0; channel < 2; channel++) { // Disable for the channel driver->channels[channel].nIEN = 2; // Check each device on the channel for(device = 0; device < 2; device++) { uint8_t type = ATA_DEVICE_TYPE_ATA, status; bool isATAPI = false; // Set up some initial state driver->devices[count].drive_exists = false; driver->devices[count].dma_enabled = false; // Select drive ata_reg_write(driver, channel, ATA_REG_HDDEVSEL, 0xA0 | (device << 4)); // Send IDENTIFY command ata_reg_write(driver, channel, ATA_REG_COMMAND, ATA_CMD_IDENTIFY); // Wait a while by writing to a bogus IO port io_wait(); io_wait(); // If the status register is 0, there is no device if(ata_reg_read(driver, channel, ATA_REG_STATUS) == 0) continue; // Wait until we read an error or DREQ for(;;) { // Read status register status = ata_reg_read(driver, channel, ATA_REG_STATUS); // If the error bit is set, we don't have an ATA device if ((status & ATA_SR_ERR)) { isATAPI = true; break; } // If BSY is clear and DREQ is set, we can read data if(!(status & ATA_SR_BSY) && (status & ATA_SR_DRQ)) break; } // If the drive is an ATAPI drive, send IDENTIFY PACKET command if(isATAPI) { uint8_t cl = ata_reg_read(driver, channel, ATA_REG_LBA1); uint8_t ch = ata_reg_read(driver, channel, ATA_REG_LBA2); if (cl == 0x14 && ch ==0xEB) type = ATA_DEVICE_TYPE_ATAPI; else if (cl == 0x69 && ch == 0x96) type = ATA_DEVICE_TYPE_ATAPI; else continue; // Unknown Type (may not be a device). ata_reg_write(driver, channel, ATA_REG_COMMAND, ATA_CMD_IDENTIFY_PACKET); // Initialise ATAPI buffer for thsi packet driver->devices[count].atapi_buffer[0] = 0xA8; } // Read Device identification block (512 bytes) ata_do_pio_read(driver, ide_buf, 256, channel); // Allocate a page of memory for the ATA output memory ata_info_t *info = (ata_info_t *) kmalloc(sizeof(ata_info_t)); memclr(info, sizeof(ata_info_t)); driver->devices[count].ata_info = info; // Save a copy of the device identification block memcpy(&info->ata_identify, ide_buf, 512); // Read device parameters driver->devices[count].drive_exists = true; driver->devices[count].type = type; driver->devices[count].channel = channel; driver->devices[count].drive = device; driver->devices[count].signature = *((uint16_t *) (ide_buf + ATA_IDENT_DEVICETYPE)); driver->devices[count].capabilities = *((uint16_t *) (ide_buf + ATA_IDENT_CAPABILITIES)); driver->devices[count].commandSets = *((uint32_t *) (ide_buf + ATA_IDENT_COMMANDSETS)); // Get size of drive if (driver->devices[count].commandSets & (1 << 26)) { // 48-bit LBA driver->devices[count].size = *((uint32_t *) (ide_buf + ATA_IDENT_MAX_LBA_EXT)); } else { // CHS or 28 bit addressing driver->devices[count].size = *((uint32_t *) (ide_buf + ATA_IDENT_MAX_LBA)); } // Convert model string char *model_read = (char *) (ide_buf + ATA_IDENT_MODEL); for(k = 0; k < 40; k += 2) { driver->devices[count].model[k] = model_read[k+1]; driver->devices[count].model[k+1] = model_read[k]; } // Trim the model string (remove space padding) for(int k = 42; k > 0; k--) { if(driver->devices[count].model[k] == 0x20 || driver->devices[count].model[k] == 0x00) { driver->devices[count].model[k] = 0x00; } else { break; } } // Does the drive support DMA? if(*((uint16_t *) (ide_buf + ATA_IDENT_FIELDVALID))) { // Get supported UDMA modes uint16_t udma_modes = *((uint16_t *) (ide_buf + ATA_IDENT_UDMASUPPORT)); if(udma_modes & 0x01) { driver->devices[count].udma_supported = kATA_UDMA0; } if(udma_modes & 0x02) { driver->devices[count].udma_supported = kATA_UDMA1; } if(udma_modes & 0x04) { driver->devices[count].udma_supported = kATA_UDMA2; } if(udma_modes & 0x08) { driver->devices[count].udma_supported = kATA_UDMA3; } if(udma_modes & 0x10) { driver->devices[count].udma_supported = kATA_UDMA4; } if(udma_modes & 0x20) { driver->devices[count].udma_supported = kATA_UDMA5; } // No UDMA support if (!(udma_modes & 0x2F)) { driver->devices[count].udma_supported = kATA_UDMANone; } // Get supported multiword DMA modes uint16_t mwdma_modes = *((uint16_t *) (ide_buf + ATA_IDENT_MWDMASUPPORT)); if(mwdma_modes & 0x01) { driver->devices[count].mwdma_supported = kATA_MWDMA0; } if(mwdma_modes & 0x02) { driver->devices[count].mwdma_supported = kATA_MWDMA1; } if(mwdma_modes & 0x04) { driver->devices[count].mwdma_supported = kATA_MWDMA2; } // No multiword DMA modes supported if (!(mwdma_modes & 0x07)) { driver->devices[count].mwdma_supported = kATA_MWDMANone; } // Get supported singleword DMA modes uint16_t swdma_modes = *((uint16_t *) (ide_buf + ATA_IDENT_SWDMASUPPORT)); if(swdma_modes & 0x01) { driver->devices[count].swdma_supported = kATA_SWDMA0; } if(swdma_modes & 0x02) { driver->devices[count].swdma_supported = kATA_SWDMA1; } if(swdma_modes & 0x04) { driver->devices[count].swdma_supported = kATA_SWDMA2; } // No singleword DMA modes supported if (!(swdma_modes & 0x07)) { driver->devices[count].swdma_supported = kATA_SWDMANone; } // Get supported PIO modes with flow control uint16_t pio_modes = *((uint16_t *) (ide_buf + ATA_IDENT_PIOSUPPORT)); if(pio_modes & 0x01) { driver->devices[count].pio_supported = kATA_PIO3; } if(pio_modes & 0x02) { driver->devices[count].pio_supported = kATA_PIO4; } else { // If there's no flow control support, fall back to PIO0 driver->devices[count].pio_supported = kATA_PIO0; } // Read PIO transfer cycle lengths driver->devices[count].pio_cycle_len = *((uint16_t *) (ide_buf + ATA_IDENT_PIOCYC)); driver->devices[count].pio_cycle_len_iordy = *((uint16_t *) (ide_buf + ATA_IDENT_PIOCYCIORDY)); /* * Multiword and singleword DMA are supported regardless of the * cable used, so the best mode the drive supports is the one * we prefer. */ driver->devices[count].mwdma_preferred = driver->devices[count].mwdma_supported; driver->devices[count].swdma_preferred = driver->devices[count].swdma_supported; driver->devices[count].pio_preferred = driver->devices[count].pio_supported; } else { driver->devices[count].udma_supported = kATA_UDMANone; driver->devices[count].mwdma_supported = kATA_MWDMANone; } // Set up a disk structure hal_disk_t *disk = hal_disk_alloc(); ASSERT(disk); disk->f = ata_hal_disk_functions; disk->drive_number = count; disk->driver = driver; disk->interface = kDiskInterfacePATA; // If the drive is a hard disk, we know that it's got media loaded if(!isATAPI) { disk->type = kDiskTypeHardDrive; disk->media_loaded = true; } else { disk->type = kDiskTypeOptical; } // Get sleep status from ATA IDENTIFY response disk->sleep_enabled = false; // Register with volume manager hal_disk_register(disk); count++; } } for (int i = 0; i < 4; i++) { if (driver->devices[i].drive_exists == true) { if(driver->devices[i].type == ATA_DEVICE_TYPE_ATA) { KDEBUG("Found ATA drive (%s) of %u sectors (Multiword DMA %u, UDMA %u, PIO %u)", driver->devices[i].model, (unsigned int)(driver->devices[i].size & 0xFFFFFFFF), driver->devices[i].mwdma_supported, driver->devices[i].udma_supported, driver->devices[count].pio_supported); } else { KDEBUG("Found ATAPI drive (%s) of %u sectors", driver->devices[i].model, (unsigned int)(driver->devices[i].size & 0xFFFFFFFF)); } } } // Clean up allocated memory kfree(ide_buf); IRQ_RES(); ata_drivers_loaded++; return driver; }
/* Every task that returns will end up here: */ void task_return_grave(void) { IRQ_OFF(); /* Prevent switch context as soon as possible, so we don't lose 'current_task's address */ irq_already_off = 1; /* This prevents IRQ_OFF to run twice */ task_kill(current_task->pid); /* Commit suicide */ for(;;); /* Never return. Remember that this 'for' won't be actually running, we just don't want to run 'ret' */ }
/****************************** Task control ******************************/ void tasking_enable(char enable) { IRQ_OFF(); is_tasking = enable ? 1 : 0; IRQ_RES(); }
static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, struct mmc_command *cmd, struct mmc_data *data) { u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: break; case MMC_RSP_R1: mmccmd |= SD_CMD_RT_1; break; case MMC_RSP_R1B: mmccmd |= SD_CMD_RT_1B; break; case MMC_RSP_R2: mmccmd |= SD_CMD_RT_2; break; case MMC_RSP_R3: mmccmd |= SD_CMD_RT_3; break; default: printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", mmc_resp_type(cmd)); return -EINVAL; } if (data) { if (data->flags & MMC_DATA_READ) { if (data->blocks > 1) mmccmd |= SD_CMD_CT_4; else mmccmd |= SD_CMD_CT_2; } else if (data->flags & MMC_DATA_WRITE) { if (data->blocks > 1) mmccmd |= SD_CMD_CT_3; else mmccmd |= SD_CMD_CT_1; } } au_writel(cmd->arg, HOST_CMDARG(host)); au_sync(); if (wait) IRQ_OFF(host, SD_CONFIG_CR); au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); au_sync(); /* Wait for the command to go on the line */ while (au_readl(HOST_CMD(host)) & SD_CMD_GO) /* nop */; /* Wait for the command to come back */ if (wait) { u32 status = au_readl(HOST_STATUS(host)); while (!(status & SD_STATUS_CR)) status = au_readl(HOST_STATUS(host)); /* Clear the CR status */ au_writel(SD_STATUS_CR, HOST_STATUS(host)); IRQ_ON(host, SD_CONFIG_CR); } return 0; }
static void au1xmmc_receive_pio(struct au1xmmc_host *host) { struct mmc_data *data; int max, count, sg_len = 0; unsigned char *sg_ptr = NULL; u32 status, val; struct scatterlist *sg; data = host->mrq->data; if (!(host->flags & HOST_F_RECV)) return; max = host->pio.len; if (host->pio.index < host->dma.len) { sg = &data->sg[host->pio.index]; sg_ptr = sg_virt(sg) + host->pio.offset; /* This is the space left inside the buffer */ sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset; /* Check if we need less than the size of the sg_buffer */ if (sg_len < max) max = sg_len; } if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { status = au_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_NE)) break; if (status & SD_STATUS_RC) { DBG("RX CRC Error [%d + %d].\n", host->pdev->id, host->pio.len, count); break; } if (status & SD_STATUS_RO) { DBG("RX Overrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } else if (status & SD_STATUS_RU) { DBG("RX Underrun [%d + %d]\n", host->pdev->id, host->pio.len, count); break; } val = au_readl(HOST_RXPORT(host)); if (sg_ptr) *sg_ptr++ = (unsigned char)(val & 0xFF); } host->pio.len -= count; host->pio.offset += count; if (sg_len && count == sg_len) { host->pio.index++; host->pio.offset = 0; } if (host->pio.len == 0) { /* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */ IRQ_OFF(host, SD_CONFIG_NE); if (host->flags & HOST_F_STOP) SEND_STOP(host); tasklet_schedule(&host->data_task); } }
static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, struct mmc_command *cmd) { u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); switch(cmd->flags) { case MMC_RSP_R1: mmccmd |= SD_CMD_RT_1; break; case MMC_RSP_R1B: mmccmd |= SD_CMD_RT_1B; break; case MMC_RSP_R2: mmccmd |= SD_CMD_RT_2; break; case MMC_RSP_R3: mmccmd |= SD_CMD_RT_3; break; } switch(cmd->opcode) { case MMC_READ_SINGLE_BLOCK: case 51: mmccmd |= SD_CMD_CT_2; break; case MMC_READ_MULTIPLE_BLOCK: mmccmd |= SD_CMD_CT_4; break; case MMC_WRITE_BLOCK: mmccmd |= SD_CMD_CT_1; break; case MMC_WRITE_MULTIPLE_BLOCK: mmccmd |= SD_CMD_CT_3; break; case MMC_STOP_TRANSMISSION: mmccmd |= SD_CMD_CT_7; break; } au_writel(cmd->arg, HOST_CMDARG(host)); au_sync(); if (wait) IRQ_OFF(host, SD_CONFIG_CR); au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); au_sync(); /* Wait for the command to go on the line */ while(1) { if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO)) break; } /* Wait for the command to come back */ if (wait) { u32 status = au_readl(HOST_STATUS(host)); while(!(status & SD_STATUS_CR)) status = au_readl(HOST_STATUS(host)); /* Clear the CR status */ au_writel(SD_STATUS_CR, HOST_STATUS(host)); IRQ_ON(host, SD_CONFIG_CR); } return MMC_ERR_NONE; }