static uint32_t mmc_zero_out(struct mmc_device* dev, uint32_t blk_addr, uint32_t num_blks) { uint32_t *out; uint32_t block_size = mmc_get_device_blocksize(); uint32_t erase_size = (block_size * num_blks); uint32_t scratch_size = target_get_max_flash_size(); dprintf(INFO, "erasing 0x%x:0x%x\n", blk_addr, num_blks); if (erase_size <= scratch_size) { /* Use scratch address if the unaligned blocks */ out = (uint32_t *) target_get_scratch_address(); } else { dprintf(CRITICAL, "Erase Fail: Erase size: %u is bigger than scratch region\n", scratch_size); return 1; } memset((void *)out, 0, erase_size); /* Flush the data to memory before writing to storage */ arch_clean_invalidate_cache_range((addr_t) out , erase_size); if (mmc_sdhci_write(dev, out, blk_addr, num_blks)) { dprintf(CRITICAL, "failed to erase the partition: %x\n", blk_addr); return 1; } return 0; }
/* * Function: mmc_read * Arg : Data address on card, o/p buffer & data length * Return : 0 on Success, non zero on failure * Flow : Read data from the card to out */ uint32_t mmc_read(uint64_t data_addr, uint32_t *out, uint32_t data_len) { uint32_t ret = 0; uint32_t block_size; uint32_t read_size = SDHCI_ADMA_MAX_TRANS_SZ; void *dev; uint8_t *sptr = (uint8_t *)out; dev = target_mmc_device(); block_size = mmc_get_device_blocksize(); ASSERT(!(data_addr % block_size)); ASSERT(!(data_len % block_size)); /* * dma onto write back memory is unsafe/nonportable, * but callers to this routine normally provide * write back buffers. Invalidate cache * before read data from mmc. */ arch_clean_invalidate_cache_range((addr_t)(out), data_len); if (target_mmc_device()) { /* TODO: This function is aware of max data that can be * tranferred using sdhci adma mode, need to have a cleaner * implementation to keep this function independent of sdhci * limitations */ while (data_len > read_size) { ret = mmc_sdhci_read((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (read_size / block_size)); if (ret) { dprintf(CRITICAL, "Failed Reading block @ %x\n",(unsigned int) (data_addr / block_size)); return ret; } sptr += read_size; data_addr += read_size; data_len -= read_size; } if (data_len) ret = mmc_sdhci_read((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (data_len / block_size)); if (ret) dprintf(CRITICAL, "Failed Reading block @ %x\n",(unsigned int) (data_addr / block_size)); } else { ret = ufs_read((struct ufs_dev *) dev, data_addr, (addr_t)out, (data_len / block_size)); if (ret) { dprintf(CRITICAL, "Error: UFS read failed writing to block: %llu\n", data_addr); } arch_invalidate_cache_range((addr_t)out, data_len); } return ret; }
/* This API calls into TZ app to read device_info */ int read_device_info_rpmb(void *info, uint32_t sz) { int ret = 0; struct send_cmd_req read_req = {0}; struct send_cmd_rsp read_rsp = {0}; read_req.cmd_id = KEYMASTER_READ_LK_DEVICE_STATE; read_req.data = (uint32_t) info; read_req.len = sz; read_rsp.cmd_id = CLIENT_CMD_READ_LK_DEVICE_STATE; /* Read the device info */ arch_clean_invalidate_cache_range((addr_t) info, sz); ret = qseecom_send_command(get_secapp_handle(), (void*) &read_req, sizeof(read_req), (void*) &read_rsp, sizeof(read_rsp)); arch_invalidate_cache_range((addr_t) info, sz); if (ret < 0 || read_rsp.status < 0) { dprintf(CRITICAL, "Reading device info failed: Error: %d\n", read_rsp.status); return read_rsp.status; } return 0; }
static void crypto_add_cmd_element(struct crypto_dev *dev, uint32_t addr, uint32_t val) { struct cmd_element *ptr = dev->ce_array; bam_add_cmd_element(&(ptr[dev->ce_array_index]), addr, val, CE_WRITE_TYPE); arch_clean_invalidate_cache_range((addr_t) &(ptr[dev->ce_array_index]), sizeof(struct cmd_element)); dev->ce_array_index++; }
int mipi_dsi_cmds_tx(struct mipi_dsi_cmd *cmds, int count) { int ret = 0; struct mipi_dsi_cmd *cm; int i = 0; char pload[256]; uint32_t off; /* Align pload at 8 byte boundry */ off = pload; off &= 0x07; if (off) off = 8 - off; off += pload; cm = cmds; for (i = 0; i < count; i++) { /* Wait for VIDEO_MODE_DONE */ ret = mdss_dsi_wait4_video_done(); if (ret) goto mipi_cmds_error; memcpy((void *)off, (cm->payload), cm->size); arch_clean_invalidate_cache_range((addr_t)(off), size); writel(off, DSI_DMA_CMD_OFFSET); writel(cm->size, DSI_DMA_CMD_LENGTH); // reg 0x48 for this build dsb(); ret += dsi_cmd_dma_trigger_for_panel(); dsb(); if (cm->wait) mdelay(cm->wait); else udelay(80); cm++; } mipi_cmds_error: return ret; }
static uint32_t crypto_write_reg(struct bam_instance *bam_core, uint32_t reg_addr, uint32_t val, uint8_t flags) { uint32_t ret = 0; struct cmd_element cmd_list_ptr; #ifdef CRYPTO_REG_ACCESS writel(val, reg_addr); #else ret = (uint32_t)bam_add_cmd_element(&cmd_list_ptr, reg_addr, val, CE_WRITE_TYPE); arch_clean_invalidate_cache_range((addr_t)&cmd_list_ptr, sizeof(struct cmd_element)); /* Enqueue the desc for the above command */ ret = bam_add_one_desc(bam_core, CRYPTO_WRITE_PIPE_INDEX, (unsigned char*)PA((addr_t)&cmd_list_ptr), BAM_CE_SIZE, BAM_DESC_CMD_FLAG | BAM_DESC_INT_FLAG | flags); if (ret) { dprintf(CRITICAL, "CRYPTO_WRITE_REG: Reg write failed. reg addr = %x\n", reg_addr); goto crypto_read_reg_err; } crypto_wait_for_cmd_exec(bam_core, 1, CRYPTO_WRITE_PIPE_INDEX); #endif crypto_read_reg_err: return ret; }
int mdss_dsi_cmds_tx(struct mipi_panel_info *mipi, struct mipi_dsi_cmd *cmds, int count, char dual_dsi) { int ret = 0; #if (DISPLAY_TYPE_MDSS == 1) struct mipi_dsi_cmd *cm; int i = 0; uint8_t pload[256]; uint32_t off; uint32_t size; uint32_t ctl_base, sctl_base; /* if dest controller is not specified, default to DSI0 */ if (!mipi) { ctl_base = MIPI_DSI0_BASE; sctl_base = MIPI_DSI1_BASE; } else { ctl_base = mipi->ctl_base; sctl_base = mipi->sctl_base; } /* Align pload at 8 byte boundary */ off = (uint32_t) pload; off &= 0x07; if (off) off = 8 - off; off += (uint32_t) pload; cm = cmds; for (i = 0; i < count; i++) { /* Wait for VIDEO_MODE_DONE */ ret = mdss_dsi_wait4_video_done(ctl_base); if (ret) goto wait4video_error; /* The payload size has to be a multiple of 4 */ size = cm->size; size &= 0x03; if (size) size = 4 - size; size += cm->size; memcpy((uint8_t *)off, (cm->payload), size); arch_clean_invalidate_cache_range((addr_t)(off), size); writel(off, ctl_base + DMA_CMD_OFFSET); writel(size, ctl_base + DMA_CMD_LENGTH); if (dual_dsi) { writel(off, sctl_base + DMA_CMD_OFFSET); writel(size, sctl_base + DMA_CMD_LENGTH); } dsb(); ret += mdss_dsi_cmd_dma_trigger_for_panel(dual_dsi, ctl_base, sctl_base); if (cm->wait) mdelay(cm->wait); else udelay(80); cm++; } wait4video_error: #endif return ret; }
void platform_early_init(void) { #if 0 ps7_init(); #else /* Unlock the registers and leave them that way */ zynq_slcr_unlock(); zynq_mio_init(); zynq_pll_init(); zynq_clk_init(); #if ZYNQ_SDRAM_INIT zynq_ddr_init(); #endif #endif /* Enable all level shifters */ SLCR_REG(LVL_SHFTR_EN) = 0xF; /* FPGA SW reset (not documented, but mandatory) */ SLCR_REG(FPGA_RST_CTRL) = 0x0; /* zynq manual says this is mandatory for cache init */ *REG32(SLCR_BASE + 0xa1c) = 0x020202; /* early initialize the uart so we can printf */ uart_init_early(); /* initialize the interrupt controller */ arm_gic_init(); zynq_gpio_init(); /* initialize the timer block */ arm_cortex_a9_timer_init(CPUPRIV_BASE, zynq_get_arm_timer_freq()); /* bump the 2nd cpu into our code space and remap the top SRAM block */ if (KERNEL_LOAD_OFFSET != 0) { /* construct a trampoline to get the 2nd cpu up to the trap routine */ /* figure out the offset of the trampoline routine in physical space from address 0 */ extern void platform_reset(void); addr_t tramp = (addr_t)&platform_reset; tramp -= KERNEL_BASE; tramp += MEMBASE; /* stuff in a ldr pc, [nextaddrress], and a target address */ uint32_t *ptr = (uint32_t *)KERNEL_BASE; ptr[0] = 0xe51ff004; // ldr pc, [pc, #-4] ptr[1] = tramp; arch_clean_invalidate_cache_range((addr_t)ptr, 8); } /* reset the 2nd cpu, letting it go through its reset vector (at 0x0 physical) */ SLCR_REG(A9_CPU_RST_CTRL) |= (1<<1); // reset cpu 1 spin(10); SLCR_REG(A9_CPU_RST_CTRL) &= ~(1<<1); // unreset cpu 1 /* wait for the 2nd cpu to reset, go through the usual reset vector, and get trapped by our code */ /* see platform/zynq/reset.S */ extern volatile int __cpu_trapped; uint count = 100000; while (--count) { arch_clean_invalidate_cache_range((addr_t)&__cpu_trapped, sizeof(__cpu_trapped)); if (__cpu_trapped != 0) break; } if (count == 0) { panic("ZYNQ: failed to trap 2nd cpu\n"); } /* bounce the 4th sram region down to lower address */ SLCR_REG(OCM_CFG) &= ~0xf; /* all banks at low address */ /* add the main memory arena */ #if !ZYNQ_CODE_IN_SDRAM && SDRAM_SIZE != 0 /* In the case of running from SRAM, and we are using SDRAM, * there is a discontinuity between the end of SRAM (256K) and the start of SDRAM (1MB), * so intentionally bump the boot-time allocator to start in the base of SDRAM. */ extern uintptr_t boot_alloc_start; extern uintptr_t boot_alloc_end; boot_alloc_start = KERNEL_BASE + MB; boot_alloc_end = KERNEL_BASE + MB; #endif #if SDRAM_SIZE != 0 pmm_add_arena(&sdram_arena); #endif pmm_add_arena(&sram_arena); }
/* * Assumes that TDs allocated already are not freed. * But it can handle case where TDs are freed as well. */ int udc_request_queue(struct udc_endpoint *ept, struct udc_request *_req) { unsigned xfer = 0; struct ept_queue_item *item, *curr_item; struct usb_request *req = (struct usb_request *)_req; unsigned phys = (unsigned)req->req.buf; unsigned len = req->req.length; unsigned int count = 0; curr_item = NULL; xfer = (len > MAX_TD_XFER_SIZE) ? MAX_TD_XFER_SIZE : len; /* * First TD allocated during request allocation */ item = req->item; item->info = INFO_BYTES(xfer) | INFO_ACTIVE; item->page0 = phys; item->page1 = (phys & 0xfffff000) + 0x1000; item->page2 = (phys & 0xfffff000) + 0x2000; item->page3 = (phys & 0xfffff000) + 0x3000; item->page4 = (phys & 0xfffff000) + 0x4000; phys += xfer; curr_item = item; len -= xfer; /* * If transfer length is more then * accomodate by 1 TD * we add more transfer descriptors */ while (len > 0) { xfer = (len > MAX_TD_XFER_SIZE) ? MAX_TD_XFER_SIZE : len; if (curr_item->next == TERMINATE) { /* * Allocate new TD only if chain doesnot * exist already */ item = memalign(CACHE_LINE, ROUNDUP(sizeof(struct ept_queue_item), CACHE_LINE)); if (!item) { dprintf(ALWAYS, "allocate USB item fail ept%d" "%s queue\n", "td count = %d\n", ept->num, ept->in ? "in" : "out", count); return -1; } else { count ++; curr_item->next = PA(item); item->next = TERMINATE; } } else /* Since next TD in chain already exists */ item = VA(curr_item->next); /* Update TD with transfer information */ item->info = INFO_BYTES(xfer) | INFO_ACTIVE; item->page0 = phys; item->page1 = (phys & 0xfffff000) + 0x1000; item->page2 = (phys & 0xfffff000) + 0x2000; item->page3 = (phys & 0xfffff000) + 0x3000; item->page4 = (phys & 0xfffff000) + 0x4000; curr_item = item; len -= xfer; phys += xfer; } /* Terminate and set interrupt for last TD */ curr_item->next = TERMINATE; curr_item->info |= INFO_IOC; enter_critical_section(); ept->head->next = PA(req->item); ept->head->info = 0; ept->req = req; arch_clean_invalidate_cache_range((addr_t) ept, sizeof(struct udc_endpoint)); arch_clean_invalidate_cache_range((addr_t) ept->head, sizeof(struct ept_queue_head)); arch_clean_invalidate_cache_range((addr_t) ept->req, sizeof(struct usb_request)); arch_clean_invalidate_cache_range((addr_t) VA(req->req.buf), req->req.length); item = req->item; /* Write all TD's to memory from cache */ while (item != NULL) { curr_item = item; if (curr_item->next == TERMINATE) item = NULL; else item = curr_item->next; arch_clean_invalidate_cache_range((addr_t) curr_item, sizeof(struct ept_queue_item)); } DBG("ept%d %s queue req=%p\n", ept->num, ept->in ? "in" : "out", req); writel(ept->bit, USB_ENDPTPRIME); exit_critical_section(); return 0; }
/** * Put image at a specific (x,y) location on the screen. * Duplicated from fbcon.c, with modifications to allow (x,y) location (instead of a centered image), * and display bmp images properly (order of copying the lines to the screen was reversed) */ static void fbcon_putImage_in_location(struct mdtp_fbimage *fbimg, uint32_t x, uint32_t y) { unsigned i = 0; unsigned bytes_per_bpp; unsigned image_base; unsigned width, pitch, height; unsigned char *logo_base = NULL; if (!fb_config) { dprintf(CRITICAL,"ERROR: NULL configuration, image cannot be displayed\n"); return; } if(fbimg) { width = pitch = fbimg->width; height = fbimg->height; logo_base = (unsigned char *)fbimg->image; } else { dprintf(CRITICAL,"ERROR: invalid image struct\n"); return; } bytes_per_bpp = ((fb_config->bpp) / BITS_PER_BYTE); #if DISPLAY_TYPE_MIPI if (bytes_per_bpp == 3) { if (fbimg->width == fb_config->width && fbimg->height == fb_config->height) { dprintf(CRITICAL,"ERROR: full screen image, cannot be displayed\n"); return; } if (fbimg->width > fb_config->width || fbimg->height > fb_config->height || (x > (fb_config->width - fbimg->width)) || (y > (fb_config->height - fbimg->height))) { dprintf(CRITICAL,"ERROR: invalid image size, larger than the screen or exceeds its margins\n"); return; } image_base = ( (y *(fb_config->width)) + x); for (i = 0; i < height; i++) { memcpy (fb_config->base + ((image_base + (i * (fb_config->width))) * bytes_per_bpp), logo_base + ((height - 1 - i) * pitch * bytes_per_bpp), width * bytes_per_bpp); } } else { dprintf(CRITICAL,"ERROR: invalid bpp value\n"); display_error_msg(); /* This will never return */ } /* Flush the contents to memory before giving the data to dma */ arch_clean_invalidate_cache_range((addr_t) fb_config->base, (fb_config->height * fb_config->width * bytes_per_bpp)); fbcon_flush(); #if DISPLAY_MIPI_PANEL_NOVATEK_BLUE if(is_cmd_mode_enabled()) mipi_dsi_cmd_mode_trigger(); #endif #endif }
/* * Function: mmc_write * Arg : Data address on card, data length, i/p buffer * Return : 0 on Success, non zero on failure * Flow : Write the data from in to the card */ uint32_t mmc_write(uint64_t data_addr, uint32_t data_len, void *in) { uint32_t val = 0; int ret = 0; uint32_t block_size = 0; uint32_t write_size = SDHCI_ADMA_MAX_TRANS_SZ; uint8_t *sptr = (uint8_t *)in; void *dev; dev = target_mmc_device(); block_size = mmc_get_device_blocksize(); ASSERT(!(data_addr % block_size)); if (data_len % block_size) data_len = ROUNDUP(data_len, block_size); /* * Flush the cache before handing over the data to * storage driver */ arch_clean_invalidate_cache_range((addr_t)in, data_len); if (target_mmc_device()) { /* TODO: This function is aware of max data that can be * tranferred using sdhci adma mode, need to have a cleaner * implementation to keep this function independent of sdhci * limitations */ while (data_len > write_size) { val = mmc_sdhci_write((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (write_size / block_size)); if (val) { dprintf(CRITICAL, "Failed Writing block @ %x\n",(unsigned int)(data_addr / block_size)); return val; } sptr += write_size; data_addr += write_size; data_len -= write_size; } if (data_len) val = mmc_sdhci_write((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (data_len / block_size)); if (val) dprintf(CRITICAL, "Failed Writing block @ %x\n",(unsigned int)(data_addr / block_size)); } else { ret = ufs_write((struct ufs_dev *)dev, data_addr, (addr_t)in, (data_len / block_size)); if (ret) { dprintf(CRITICAL, "Error: UFS write failed writing to block: %llu\n", data_addr); val = 1; } } return val; }
int ucs_do_scsi_read(struct ufs_dev *dev, struct scsi_rdwr_req *req) { STACKBUF_DMA_ALIGN(cdb, sizeof(struct scsi_rdwr_cdb)); struct scsi_req_build_type req_upiu; struct scsi_rdwr_cdb *cdb_param; uint32_t blks_remaining; uint16_t blks_to_transfer; uint64_t bytes_to_transfer; uint32_t start_blk; uint32_t buf; blks_remaining = req->num_blocks; buf = req->data_buffer_base; start_blk = req->start_lba; cdb_param = (struct scsi_rdwr_cdb*) cdb; while (blks_remaining) { if (blks_remaining <= SCSI_MAX_DATA_TRANS_BLK_LEN) { blks_to_transfer = blks_remaining; blks_remaining = 0; } else { blks_to_transfer = SCSI_MAX_DATA_TRANS_BLK_LEN; blks_remaining -= SCSI_MAX_DATA_TRANS_BLK_LEN; } bytes_to_transfer = blks_to_transfer * UFS_DEFAULT_SECTORE_SIZE; memset(cdb_param, 0, sizeof(struct scsi_rdwr_cdb)); cdb_param->opcode = SCSI_CMD_READ10; cdb_param->cdb1 = SCSI_READ_WRITE_10_CDB1(0, 0, 1, 0); cdb_param->lba = BE32(start_blk); cdb_param->trans_len = BE16(blks_to_transfer); dsb(); arch_clean_invalidate_cache_range((addr_t) cdb_param, sizeof(struct scsi_rdwr_cdb)); memset(&req_upiu, 0 , sizeof(struct scsi_req_build_type)); req_upiu.cdb = (addr_t) cdb_param; req_upiu.data_buffer_addr = buf; req_upiu.data_len = bytes_to_transfer; req_upiu.flags = UPIU_FLAGS_READ; req_upiu.lun = req->lun; req_upiu.dd = UTRD_TARGET_TO_SYSTEM; if (ucs_do_scsi_cmd(dev, &req_upiu)) { dprintf(CRITICAL, "ucs_do_scsi_read: failed\n"); return -UFS_FAILURE; } buf += bytes_to_transfer; start_blk += SCSI_MAX_DATA_TRANS_BLK_LEN; } return UFS_SUCCESS; }