static int hb_voltage_change(unsigned int freq) { u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000}; struct mbox_client cl; int ret = -ETIMEDOUT; struct mbox_chan *chan; cl.rx_callback = NULL; cl.tx_done = NULL; cl.tx_block = true; cl.tx_tout = 1000; /* 1 sec */ cl.link_data = NULL; cl.knows_txdone = false; cl.chan_name = "pl320:A9_to_M3"; chan = mbox_request_channel(&cl); if (IS_ERR(chan)) return PTR_ERR(chan); if (mbox_send_message(chan, (void *)msg)) ret = msg[1]; /* PL320 updates buffer with FIFO after ACK */ mbox_free_channel(chan); return ret; }
struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout) { struct cmdq_client *client; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return (struct cmdq_client *)-ENOMEM; client->timeout_ms = timeout; if (timeout != CMDQ_NO_TIMEOUT) { spin_lock_init(&client->lock); timer_setup(&client->timer, cmdq_client_timeout, 0); } client->pkt_cnt = 0; client->client.dev = dev; client->client.tx_block = false; client->chan = mbox_request_channel(&client->client, index); if (IS_ERR(client->chan)) { long err; dev_err(dev, "failed to request channel\n"); err = PTR_ERR(client->chan); kfree(client); return ERR_PTR(err); } return client; }
static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) { struct mbox_chan *chan; struct mbox_client cl = {0}; struct mhu_data_buf *data = scpi_buf->data; u32 status; cl.dev = the_scpi_device; cl.rx_callback = scpi_rx_callback; chan = mbox_request_channel(&cl, high_priority); if (IS_ERR(chan)) return PTR_ERR(chan); init_completion(&scpi_buf->complete); if (mbox_send_message(chan, (void *)data) < 0) { status = SCPI_ERR_TIMEOUT; goto free_channel; } wait_for_completion(&scpi_buf->complete); status = *(u32 *)(data->rx_buf); /* read first word */ free_channel: mbox_free_channel(chan); return scpi_to_linux_errno(status); }
static int wkup_m3_rproc_start(struct rproc *rproc) { struct wkup_m3_rproc *m3_rproc = rproc->priv; struct platform_device *pdev = m3_rproc->pdev; struct device *dev = &pdev->dev; struct wkup_m3_platform_data *pdata = dev->platform_data; int ret; wkup_m3_fw_version_clear(); ret = pdata->deassert_reset(pdev, pdata->reset_name); if (ret) { dev_err(dev, "Unable to reset wkup_m3!\n"); return -ENODEV; } m3_rproc->mbox_client.dev = dev; m3_rproc->mbox_client.tx_done = NULL; m3_rproc->mbox_client.rx_callback = NULL; m3_rproc->mbox_client.tx_block = false; m3_rproc->mbox_client.knows_txdone = false; m3_rproc->mbox = mbox_request_channel(&m3_rproc->mbox_client, 0); if (IS_ERR(m3_rproc->mbox)) { dev_err(dev, "IPC Request for A8->M3 Channel failed!\n"); ret = PTR_ERR(m3_rproc->mbox); m3_rproc->mbox = NULL; return ret; } if (wkup_m3_pm_ops && wkup_m3_pm_ops->rproc_ready && !m3_rproc_static->is_rtc_only) wkup_m3_pm_ops->rproc_ready(&m3_rproc_static->pdev->dev); m3_rproc_static->is_active = 1; return 0; }
static int scpi_probe(struct platform_device *pdev) { int count, idx, ret; struct resource res; struct scpi_chan *scpi_chan; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); if (!scpi_info) { dev_err(dev, "failed to allocate memory for scpi drvinfo\n"); return -ENOMEM; } count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { dev_err(dev, "no mboxes property in '%s'\n", np->full_name); return -ENODEV; } scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL); if (!scpi_chan) { dev_err(dev, "failed to allocate memory scpi chaninfo\n"); return -ENOMEM; } for (idx = 0; idx < count; idx++) { resource_size_t size; struct scpi_chan *pchan = scpi_chan + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); if (of_address_to_resource(shmem, 0, &res)) { dev_err(dev, "failed to get SCPI payload mem resource\n"); ret = -EINVAL; goto err; } size = resource_size(&res); pchan->rx_payload = devm_ioremap(dev, res.start, size); if (!pchan->rx_payload) { dev_err(dev, "failed to ioremap SCPI payload\n"); ret = -EADDRNOTAVAIL; goto err; } pchan->tx_payload = pchan->rx_payload + (size >> 1); cl->dev = dev; cl->rx_callback = scpi_handle_remote_msg; cl->tx_prepare = scpi_tx_prepare; cl->tx_block = true; cl->tx_tout = 50; cl->knows_txdone = false; /* controller can ack */ INIT_LIST_HEAD(&pchan->rx_pending); INIT_LIST_HEAD(&pchan->xfers_list); spin_lock_init(&pchan->rx_lock); mutex_init(&pchan->xfers_lock); ret = scpi_alloc_xfer_list(dev, pchan); if (!ret) { pchan->chan = mbox_request_channel(cl, idx); if (!IS_ERR(pchan->chan)) continue; ret = -EPROBE_DEFER; dev_err(dev, "failed to acquire channel#%d\n", idx); } err: scpi_free_channels(dev, scpi_chan, idx); scpi_info = NULL; return ret; } scpi_info->channels = scpi_chan; scpi_info->num_chans = count; platform_set_drvdata(pdev, scpi_info); ret = scpi_init_versions(scpi_info); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); scpi_remove(pdev); return ret; } _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n", PROTOCOL_REV_MAJOR(scpi_info->protocol_version), PROTOCOL_REV_MINOR(scpi_info->protocol_version), FW_REV_MAJOR(scpi_info->firmware_version), FW_REV_MINOR(scpi_info->firmware_version), FW_REV_PATCH(scpi_info->firmware_version)); scpi_info->scpi_ops = &scpi_ops; ret = sysfs_create_groups(&dev->kobj, versions_groups); if (ret) dev_err(dev, "unable to create sysfs version group\n"); return of_platform_populate(dev->of_node, NULL, NULL, dev); }
static int hi6220_stub_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct clk_init_data init; struct hi6220_stub_clk *stub_clk; struct clk *clk; struct device_node *np = pdev->dev.of_node; int ret; stub_clk = devm_kzalloc(dev, sizeof(*stub_clk), GFP_KERNEL); if (!stub_clk) return -ENOMEM; stub_clk->dfs_map = syscon_regmap_lookup_by_phandle(np, "hisilicon,hi6220-clk-sram"); if (IS_ERR(stub_clk->dfs_map)) { dev_err(dev, "failed to get sram regmap\n"); return PTR_ERR(stub_clk->dfs_map); } stub_clk->hw.init = &init; stub_clk->dev = dev; stub_clk->id = HI6220_STUB_ACPU0; /* Use mailbox client with blocking mode */ stub_clk->cl.dev = dev; stub_clk->cl.tx_done = NULL; stub_clk->cl.tx_block = true; stub_clk->cl.tx_tout = 500; stub_clk->cl.knows_txdone = false; /* Allocate mailbox channel */ stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0); if (IS_ERR(stub_clk->mbox)) { dev_err(dev, "failed get mailbox channel\n"); return PTR_ERR(stub_clk->mbox); }; init.name = "acpu0"; init.ops = &hi6220_stub_clk_ops; init.num_parents = 0; init.flags = CLK_IS_ROOT; clk = devm_clk_register(dev, &stub_clk->hw); if (IS_ERR(clk)) return PTR_ERR(clk); ret = of_clk_add_provider(np, of_clk_src_simple_get, clk); if (ret) { dev_err(dev, "failed to register OF clock provider\n"); return ret; } /* initialize buffer to zero */ regmap_write(stub_clk->dfs_map, ACPU_DFS_FLAG, 0x0); regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, 0x0); regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, 0x0); dev_dbg(dev, "Registered clock '%s'\n", init.name); return 0; }