static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id) { atomic_set(&dm_pending, 0); ocmem_write(DM_INTR_DISABLE, dm_base + DM_INTR_CLR); wake_up_interruptible(&dm_wq); return IRQ_HANDLED; }
static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id) { unsigned status; unsigned irq_status; status = ocmem_read(dm_base + DM_GEN_STATUS); irq_status = ocmem_read(dm_base + DM_INT_STATUS); pr_debug("irq:dm_status %x irq_status %x\n", status, irq_status); if (irq_status & BIT(0)) { pr_debug("Data mover completed\n"); ocmem_write(BIT(0), dm_base + DM_INTR_CLR); pr_debug("Last re-mapped address block %x\n", ocmem_read(br_base + BR_LAST_ADDR)); complete(&dm_transfer_event); } else if (irq_status & BIT(1)) { pr_debug("Data clear engine completed\n"); ocmem_write(BIT(1), dm_base + DM_INTR_CLR); complete(&dm_clear_event); } else { BUG_ON(1); } return IRQ_HANDLED; }
/* Must be called with region mutex held */ static int commit_region_state(unsigned region_num) { int rc = -1; unsigned new_state; if (region_num >= num_regions) return -EINVAL; new_state = region_ctrl[region_num].r_state; pr_debug("ocmem: commit region (%d) new state %x\n", region_num, new_state); if (rpm_power_control) rc = rpm_write(new_state, region_num); else rc = ocmem_write(new_state, ocmem_base + PSCGC_CTL_n(region_num)); return 0; }
int ocmem_rdm_init(struct platform_device *pdev) { struct ocmem_plat_data *pdata = NULL; int rc = 0; pdata = platform_get_drvdata(pdev); br_base = pdata->br_base; dm_base = pdata->dm_base; rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler, IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata); if (rc) { dev_err(&pdev->dev, "Failed to request dm irq"); return -EINVAL; } init_waitqueue_head(&dm_wq); /* enable dm interrupts */ ocmem_write(DM_INTR_ENABLE, dm_base + DM_INTR_MASK); return 0; }
int ocmem_clear(unsigned long start, unsigned long size) { INIT_COMPLETION(dm_clear_event); ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK); ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR); ocmem_write(start, dm_base + DM_CLR_OFFSET); ocmem_write(size, dm_base + DM_CLR_SIZE); ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN); mb(); ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER); wait_for_completion(&dm_clear_event); return 0; }
/* Lock during transfers */ int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist, unsigned long start, int direction) { int num_chunks = clist->num_chunks; int slot = client_slot_start(id); int table_start = 0; int table_end = 0; int br_ctrl = 0; int br_id = 0; int client_id = 0; int dm_ctrl = 0; int i = 0; int j = 0; int status = 0; int rc = 0; rc = ocmem_enable_core_clock(); if (rc < 0) { pr_err("RDM transfer failed for client %s (id: %d)\n", get_name(id), id); return rc; } /* Clear DM Mask */ ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK); /* Clear DM Interrupts */ ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR); for (i = 0, j = slot; i < num_chunks; i++, j++) { struct ocmem_chunk *chunk = &clist->chunks[i]; int sz = chunk->size; int paddr = chunk->ddr_paddr; int tbl_n_ctrl = 0; tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE; if (chunk->ro) tbl_n_ctrl |= (1 << BR_RW_SHIFT); /* Table Entry n of BR and DM */ ocmem_write(start, br_base + BR_TBL_n_offset(j)); ocmem_write(sz, br_base + BR_TBL_n_size(j)); ocmem_write(paddr, br_base + BR_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j)); ocmem_write(start, dm_base + DM_TBL_n_offset(j)); ocmem_write(sz, dm_base + DM_TBL_n_size(j)); ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j)); start += sz; } br_id = client_ctrl_id(id); table_start = slot; table_end = slot + num_chunks - 1; br_ctrl |= (table_start << BR_TBL_START); br_ctrl |= (table_end << BR_TBL_END); ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id))); /* Enable BR */ ocmem_write(0x1, br_base + BR_CTRL); /* Compute DM Control Value */ dm_ctrl |= (table_start << DM_TBL_START); dm_ctrl |= (table_end << DM_TBL_END); client_id = client_ctrl_id(id); dm_ctrl |= (client_id << DM_CLIENT_SHIFT); dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT); dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT); dm_ctrl |= (direction << DM_DIR_SHIFT); status = ocmem_read(dm_base + DM_GEN_STATUS); pr_debug("Transfer status before %x\n", status); INIT_COMPLETION(dm_transfer_event); /* The DM and BR tables must be programmed before triggering the * Data Mover else the coherent transfer would be corrupted */ mb(); /* Trigger DM */ ocmem_write(dm_ctrl, dm_base + DM_CTRL); pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl); wait_for_completion(&dm_transfer_event); pr_debug("Completed transferring %d segments\n", num_chunks); ocmem_disable_core_clock(); return 0; }
/* Lock during transfers */ int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist, unsigned long start, int direction) { int num_chunks = clist->num_chunks; int slot = client_slot_start(id); int table_start = 0; int table_end = 0; int br_ctrl = 0; int br_id = 0; int dm_ctrl = 0; int i = 0; int j = 0; int status = 0; for (i = 0, j = slot; i < num_chunks; i++, j++) { struct ocmem_chunk *chunk = &clist->chunks[i]; int sz = chunk->size; int paddr = chunk->ddr_paddr; int tbl_n_ctrl = 0; tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE; if (chunk->ro) tbl_n_ctrl |= (1 << BR_RW_SHIFT); /* Table Entry n of BR and DM */ ocmem_write(start, br_base + BR_TBL_n_offset(j)); ocmem_write(sz, br_base + BR_TBL_n_size(j)); ocmem_write(paddr, br_base + BR_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j)); ocmem_write(start, dm_base + DM_TBL_n_offset(j)); ocmem_write(sz, dm_base + DM_TBL_n_size(j)); ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j)); ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j)); start += sz; } br_id = client_ctrl_id(id); table_start = slot; table_end = slot + num_chunks - 1; br_ctrl |= (table_start << BR_TBL_START); br_ctrl |= (table_end << BR_TBL_END); ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id))); /* Enable BR */ ocmem_write(0x1, br_base + BR_CTRL); /* Compute DM Control Value */ dm_ctrl |= (table_start << DM_TBL_START); dm_ctrl |= (table_end << DM_TBL_END); dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT); dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT); dm_ctrl |= (direction << DM_DIR_SHIFT); status = ocmem_read(dm_base + DM_STATUS); pr_debug("Transfer status before %x\n", status); atomic_set(&dm_pending, 1); /* Trigger DM */ ocmem_write(dm_ctrl, dm_base + DM_CTRL); pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl); wait_event_interruptible(dm_wq, atomic_read(&dm_pending) == 0); return 0; }