static void stm_send(void __iomem *addr, const void *data, u32 size, u8 write_bytes) { u8 paload[8]; if (stm_addr_unaligned(data, write_bytes)) { memcpy(paload, data, size); data = paload; } /* now we are 64bit/32bit aligned */ switch (size) { #ifdef CONFIG_64BIT case 8: writeq_relaxed(*(u64 *)data, addr); break; #endif case 4: writel_relaxed(*(u32 *)data, addr); break; case 2: writew_relaxed(*(u16 *)data, addr); break; case 1: writeb_relaxed(*(u8 *)data, addr); break; default: break; } }
static int cpc_write(struct cpc_reg *reg, u64 val) { int ret_val = 0; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { void __iomem *vaddr = GET_PCC_VADDR(reg->address); switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; case 16: writew_relaxed(val, vaddr); break; case 32: writel_relaxed(val, vaddr); break; case 64: writeq_relaxed(val, vaddr); break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", reg->bit_width); ret_val = -EFAULT; break; } } else ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, val, reg->bit_width); return ret_val; }
void xcv_setup_link(bool link_up, int link_speed) { u64 cfg; int speed = 2; if (!xcv) { dev_err(&xcv->pdev->dev, "XCV init not done, probe may have failed\n"); return; } if (link_speed == 100) speed = 1; else if (link_speed == 10) speed = 0; if (link_up) { /* set operating speed */ cfg = readq_relaxed(xcv->reg_base + XCV_CTL); cfg &= ~0x03; cfg |= speed; writeq_relaxed(cfg, xcv->reg_base + XCV_CTL); /* Reset datapaths */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg |= TX_DATA_RESET | RX_DATA_RESET; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); /* Enable the packet flow */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg |= TX_PKT_RESET | RX_PKT_RESET; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); /* Return credits to RGX */ writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET); } else { /* Disable packet flow */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg &= ~(TX_PKT_RESET | RX_PKT_RESET); writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); readq_relaxed(xcv->reg_base + XCV_RESET); } }
/* Send a mailbox message to VF * @vf: vf to which this message to be sent * @mbx: Message to be sent */ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) { void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); u64 *msg = (u64 *)mbx; /* In first revision HW, mbox interrupt is triggerred * when PF writes to MBOX(1), in next revisions when * PF writes to MBOX(0) */ if (pass1_silicon(nic)) { /* see the comment for nic_reg_write()/nic_reg_read() * functions above */ writeq_relaxed(msg[0], mbx_addr); writeq_relaxed(msg[1], mbx_addr + 8); } else { writeq_relaxed(msg[1], mbx_addr + 8); writeq_relaxed(msg[0], mbx_addr); } }
static int acpi_parking_protocol_cpu_boot(unsigned int cpu) { struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; struct parking_protocol_mailbox __iomem *mailbox; __le32 cpu_id; /* * Map mailbox memory with attribute device nGnRE (ie ioremap - * this deviates from the parking protocol specifications since * the mailboxes are required to be mapped nGnRnE; the attribute * discrepancy is harmless insofar as the protocol specification * is concerned). * If the mailbox is mistakenly allocated in the linear mapping * by FW ioremap will fail since the mapping will be prevented * by the kernel (it clashes with the linear mapping attributes * specifications). */ mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); if (!mailbox) return -EIO; cpu_id = readl_relaxed(&mailbox->cpu_id); /* * Check if firmware has set-up the mailbox entry properly * before kickstarting the respective cpu. */ if (cpu_id != ~0U) { iounmap(mailbox); return -ENXIO; } /* * stash the mailbox address mapping to use it for further FW * checks in the postboot method */ cpu_entry->mailbox = mailbox; /* * We write the entry point and cpu id as LE regardless of the * native endianness of the kernel. Therefore, any boot-loaders * that read this address need to convert this address to the * Boot-Loader's endianness before jumping. */ writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point); writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; }
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; void __iomem *vaddr = 0; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_write_ffh(cpu, reg, val); else return acpi_os_write_memory((acpi_physical_address)reg->address, val, reg->bit_width); switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; case 16: writew_relaxed(val, vaddr); break; case 32: writel_relaxed(val, vaddr); break; case 64: writeq_relaxed(val, vaddr); break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", reg->bit_width); ret_val = -EFAULT; break; } return ret_val; }
static int smp_spin_table_cpu_prepare(unsigned int cpu) { __le64 __iomem *release_addr; if (!cpu_release_addr[cpu]) return -ENODEV; /* * The cpu-release-addr may or may not be inside the linear mapping. * As ioremap_cache will either give us a new mapping or reuse the * existing linear mapping, we can use it to cover both cases. In * either case the memory will be MT_NORMAL. */ release_addr = ioremap_cache(cpu_release_addr[cpu], sizeof(*release_addr)); if (!release_addr) return -ENOMEM; /* * We write the release address as LE regardless of the native * endianess of the kernel. Therefore, any boot-loaders that * read this address need to convert this address to the * boot-loader's endianess before jumping. This is mandated by * the boot protocol. */ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); __flush_dcache_area((__force void *)release_addr, sizeof(*release_addr)); /* * Send an event to wake up the secondary CPU. */ sev(); iounmap(release_addr); return 0; }
void xcv_init_hw(void) { u64 cfg; /* Take DLL out of reset */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg &= ~DLL_RESET; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); /* Take clock tree out of reset */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg &= ~CLK_RESET; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); /* Wait for DLL to lock */ msleep(1); /* Configure DLL - enable or bypass * TX no bypass, RX bypass */ cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL); cfg &= ~0xFF03; cfg |= CLKRX_BYP; writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL); /* Enable compensation controller and force the * write to be visible to HW by readig back. */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg |= COMP_EN; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); readq_relaxed(xcv->reg_base + XCV_RESET); /* Wait for compensation state machine to lock */ msleep(10); /* enable the XCV block */ cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg |= PORT_EN; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); cfg = readq_relaxed(xcv->reg_base + XCV_RESET); cfg |= CLK_RESET; writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); }
static void etm4_enable_hw(void *info) { int i; struct etmv4_drvdata *drvdata = info; struct etmv4_config *config = &drvdata->config; CS_UNLOCK(drvdata->base); etm4_os_unlock(drvdata); /* Disable the trace unit before programming trace registers */ writel_relaxed(0, drvdata->base + TRCPRGCTLR); /* wait for TRCSTATR.IDLE to go up */ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) dev_err(drvdata->dev, "timeout while waiting for Idle Trace Status\n"); writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR); /* nothing specific implemented */ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R); writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R); writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR); writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR); writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR); writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR); writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR); writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR); writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR); writel_relaxed(config->vissctlr, drvdata->base + TRCVISSCTLR); writel_relaxed(config->vipcssctlr, drvdata->base + TRCVIPCSSCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) writel_relaxed(config->seq_ctrl[i], drvdata->base + TRCSEQEVRn(i)); writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR); writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR); writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR); for (i = 0; i < drvdata->nr_cntr; i++) { writel_relaxed(config->cntrldvr[i], drvdata->base + TRCCNTRLDVRn(i)); writel_relaxed(config->cntr_ctrl[i], drvdata->base + TRCCNTCTLRn(i)); writel_relaxed(config->cntr_val[i], drvdata->base + TRCCNTVRn(i)); } /* Resource selector pair 0 is always implemented and reserved */ for (i = 0; i < drvdata->nr_resource * 2; i++) writel_relaxed(config->res_ctrl[i], drvdata->base + TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { writel_relaxed(config->ss_ctrl[i], drvdata->base + TRCSSCCRn(i)); writel_relaxed(config->ss_status[i], drvdata->base + TRCSSCSRn(i)); writel_relaxed(config->ss_pe_cmp[i], drvdata->base + TRCSSPCICRn(i)); } for (i = 0; i < drvdata->nr_addr_cmp; i++) { writeq_relaxed(config->addr_val[i], drvdata->base + TRCACVRn(i)); writeq_relaxed(config->addr_acc[i], drvdata->base + TRCACATRn(i)); } for (i = 0; i < drvdata->numcidc; i++) writeq_relaxed(config->ctxid_pid[i], drvdata->base + TRCCIDCVRn(i)); writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); for (i = 0; i < drvdata->numvmidc; i++) writeq_relaxed(config->vmid_val[i], drvdata->base + TRCVMIDCVRn(i)); writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); /* * Request to keep the trace unit powered and also * emulation of powerdown */ writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU, drvdata->base + TRCPDCR); /* Enable the trace unit */ writel_relaxed(1, drvdata->base + TRCPRGCTLR); /* wait for TRCSTATR.IDLE to go back down to '0' */ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) dev_err(drvdata->dev, "timeout while waiting for Idle Trace Status\n"); CS_LOCK(drvdata->base); dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); }
/* Register read/write APIs */ static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) { writeq_relaxed(val, nic->reg_base + offset); }