static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id) { unsigned status; unsigned irq_status; status = ocmem_read(dm_base + DM_GEN_STATUS); irq_status = ocmem_read(dm_base + DM_INT_STATUS); pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status); if (irq_status & BIT(0)) { pr_debug("Data mover completed\n"); ocmem_write(BIT(0), dm_base + DM_INTR_CLR); pr_debug("Last re-mapped address block %x\n", ocmem_read(br_base + BR_LAST_ADDR)); complete(&dm_transfer_event); } else if (irq_status & BIT(1)) { pr_debug("Data clear engine completed\n"); ocmem_write(BIT(1), dm_base + DM_INTR_CLR); complete(&dm_clear_event); } else { BUG_ON(1); } return IRQ_HANDLED; }
static int read_hw_region_state(unsigned region_num) { int state; pr_debug("rpm_get_region_state: #: %d\n", region_num); if (region_num >= num_regions) return -EINVAL; state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num)); pr_debug("ocmem: region (%d) state %x\n", region_num, state); return state; }
/* Must be called with region mutex held */ static int read_region_state(unsigned region_num) { int state; pr_debug("rpm_get_region_state: #: %d\n", region_num); if (region_num >= num_regions) return -EINVAL; if (rpm_power_control) state = region_ctrl[region_num].r_state; else state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num)); pr_debug("ocmem: region (%d) state %x\n", region_num, state); return state; }
/* Lock during transfers */ int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist, unsigned long start, int direction) { int num_chunks = clist->num_chunks; int slot = client_slot_start(id); int table_start = 0; int table_end = 0; int br_ctrl = 0; int br_id = 0; int client_id = 0; int dm_ctrl = 0; int i = 0; int j = 0; int status = 0; int rc = 0; rc = ocmem_enable_core_clock(); if (rc < 0) { pr_err("RDM transfer failed for client %s (id: %d)\n", get_name(id), id); return rc; } /* Clear DM Mask */ ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK); /* Clear DM Interrupts */ ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR); for (i = 0, j = slot; i < num_chunks; i++, j++) { struct ocmem_chunk *chunk = &clist->chunks[i]; int sz = chunk->size; int paddr = chunk->ddr_paddr; int tbl_n_ctrl = 0; tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE; if (chunk->ro) tbl_n_ctrl |= (1 << BR_RW_SHIFT); /* Table Entry n of BR and DM */ ocmem_write(start, br_base + BR_TBL_n_offset(j)); ocmem_write(sz, br_base + BR_TBL_n_size(j)); ocmem_write(paddr, br_base + BR_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j)); ocmem_write(start, dm_base + DM_TBL_n_offset(j)); ocmem_write(sz, dm_base + DM_TBL_n_size(j)); ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j)); start += sz; } br_id = client_ctrl_id(id); table_start = slot; table_end = slot + num_chunks - 1; br_ctrl |= (table_start << BR_TBL_START); br_ctrl |= (table_end << BR_TBL_END); ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id))); /* Enable BR */ ocmem_write(0x1, br_base + BR_CTRL); /* Compute DM Control Value */ dm_ctrl |= (table_start << DM_TBL_START); dm_ctrl |= (table_end << DM_TBL_END); client_id = client_ctrl_id(id); dm_ctrl |= (client_id << DM_CLIENT_SHIFT); dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT); dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT); dm_ctrl |= (direction << DM_DIR_SHIFT); status = ocmem_read(dm_base + DM_GEN_STATUS); pr_debug("Transfer status before %x\n", status); INIT_COMPLETION(dm_transfer_event); /* The DM and BR tables must be programmed before triggering the * Data Mover else the coherent transfer would be corrupted */ mb(); /* Trigger DM */ ocmem_write(dm_ctrl, dm_base + DM_CTRL); pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl); wait_for_completion(&dm_transfer_event); pr_debug("Completed transferring %d segments\n", num_chunks); ocmem_disable_core_clock(); return 0; }
int ocmem_core_init(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ocmem_plat_data *pdata = NULL; unsigned hw_ver; bool interleaved; unsigned i, j, k; unsigned rsc_type = 0; int rc = 0; pdata = platform_get_drvdata(pdev); ocmem_base = pdata->reg_base; rc = ocmem_enable_core_clock(); if (rc < 0) return rc; hw_ver = ocmem_read(ocmem_base + OC_HW_PROFILE); if (pdata->nr_regions != OCMEM_V1_REGIONS) { pr_err("Invalid number of regions (%d)\n", pdata->nr_regions); goto hw_not_supported; } num_macros = (hw_ver & NUM_MACROS_MASK) >> NUM_MACROS_SHIFT; num_ports = (hw_ver & NUM_PORTS_MASK) >> NUM_PORTS_SHIFT; if (num_macros != OCMEM_V1_MACROS) { pr_err("Invalid number of macros (%d)\n", pdata->nr_macros); goto hw_not_supported; } interleaved = (hw_ver & INTERLEAVING_MASK) >> INTERLEAVING_SHIFT; if (interleaved == false) { pr_err("Interleaving is disabled\n"); goto hw_not_supported; } num_regions = pdata->nr_regions; pdata->interleaved = true; pdata->nr_macros = num_macros; pdata->nr_ports = num_ports; macro_size = OCMEM_V1_MACRO_SZ * 2; num_banks = num_ports / 2; region_size = macro_size * num_banks; rsc_type = pdata->rpm_rsc_type; pr_debug("ocmem_core: ports %d regions %d macros %d interleaved %d\n", num_ports, num_regions, num_macros, interleaved); region_ctrl = devm_kzalloc(dev, sizeof(struct ocmem_hw_region) * num_regions, GFP_KERNEL); if (!region_ctrl) { goto err_no_mem; } mutex_init(®ion_ctrl_lock); for (i = 0 ; i < num_regions; i++) { struct ocmem_hw_region *region = ®ion_ctrl[i]; struct msm_rpm_request *req = NULL; region->interleaved = interleaved; region->mode = MODE_DEFAULT; region->r_state = REGION_DEFAULT_OFF; region->num_macros = num_banks; region->macro = devm_kzalloc(dev, sizeof(struct ocmem_hw_macro) * num_banks, GFP_KERNEL); if (!region->macro) { goto err_no_mem; } for (j = 0; j < num_banks; j++) { struct ocmem_hw_macro *m = ®ion->macro[j]; m->m_state = MACRO_OFF; for (k = 0; k < OCMEM_CLIENT_MAX; k++) { atomic_set(&m->m_on[k], 0); atomic_set(&m->m_retain[k], 0); } } if (pdata->rpm_pwr_ctrl) { rpm_power_control = true; req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, rsc_type, i, num_banks); if (!req) { pr_err("Unable to create RPM request\n"); goto region_init_error; } pr_debug("rpm request type %x (rsc: %d) with %d elements\n", rsc_type, i, num_banks); region->rpm_req = req; } if (ocmem_region_toggle(i)) { pr_err("Failed to verify region %d\n", i); goto region_init_error; } if (ocmem_region_set_default_state(i)) { pr_err("Failed to initialize region %d\n", i); goto region_init_error; } } rc = ocmem_core_set_default_state(); if (rc < 0) return rc; ocmem_disable_core_clock(); return 0; err_no_mem: pr_err("ocmem: Unable to allocate memory\n"); region_init_error: hw_not_supported: pr_err("Unsupported OCMEM h/w configuration %x\n", hw_ver); ocmem_disable_core_clock(); return -EINVAL; }
/* Lock during transfers */ int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist, unsigned long start, int direction) { int num_chunks = clist->num_chunks; int slot = client_slot_start(id); int table_start = 0; int table_end = 0; int br_ctrl = 0; int br_id = 0; int dm_ctrl = 0; int i = 0; int j = 0; int status = 0; for (i = 0, j = slot; i < num_chunks; i++, j++) { struct ocmem_chunk *chunk = &clist->chunks[i]; int sz = chunk->size; int paddr = chunk->ddr_paddr; int tbl_n_ctrl = 0; tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE; if (chunk->ro) tbl_n_ctrl |= (1 << BR_RW_SHIFT); /* Table Entry n of BR and DM */ ocmem_write(start, br_base + BR_TBL_n_offset(j)); ocmem_write(sz, br_base + BR_TBL_n_size(j)); ocmem_write(paddr, br_base + BR_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j)); ocmem_write(start, dm_base + DM_TBL_n_offset(j)); ocmem_write(sz, dm_base + DM_TBL_n_size(j)); ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j)); start += sz; } br_id = client_ctrl_id(id); table_start = slot; table_end = slot + num_chunks - 1; br_ctrl |= (table_start << BR_TBL_START); br_ctrl |= (table_end << BR_TBL_END); ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id))); /* Enable BR */ ocmem_write(0x1, br_base + BR_CTRL); /* Compute DM Control Value */ dm_ctrl |= (table_start << DM_TBL_START); dm_ctrl |= (table_end << DM_TBL_END); dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT); dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT); dm_ctrl |= (direction << DM_DIR_SHIFT); status = ocmem_read(dm_base + DM_STATUS); pr_debug("Transfer status before %x\n", status); atomic_set(&dm_pending, 1); /* Trigger DM */ ocmem_write(dm_ctrl, dm_base + DM_CTRL); pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl); wait_event_interruptible(dm_wq, atomic_read(&dm_pending) == 0); return 0; }