static void hsi_controller_exit(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; hsi_gdd_exit(hsi_ctrl); hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p); if (pdata->clk_notifier_unregister) pdata->clk_notifier_unregister(hsi_ctrl->hsi_clk, &hsi_ctrl->hsi_nb); clk_put(hsi_ctrl->hsi_clk); }
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl) { struct platform_device *pd = to_platform_device(hsi_ctrl->dev); struct hsi_platform_data *pdata = dev_get_platdata(hsi_ctrl->dev); struct hsi_port *hsi_p; unsigned int port; int err; for (port = 0; port < hsi_ctrl->max_p; port++) { hsi_p = &hsi_ctrl->hsi_port[port]; hsi_p->flags = 0; hsi_p->port_number = pdata->ctx->pctx[port].port_number; hsi_p->hsi_controller = hsi_ctrl; hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ? HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX; hsi_p->irq = 0; hsi_p->wake_rx_3_wires_mode = 0; /* 4 wires */ hsi_p->wake_3_wires_mode = 0; /* 4 wires */ hsi_p->hsi_mode = 0x00; //hsi_mode, bit0~31; hsi_p->cawake_status = -1; /* Unknown */ hsi_p->cawake_off_event = false; hsi_p->cawake_double_int = false; hsi_p->acwake_status = 0; hsi_p->in_int_tasklet = false; hsi_p->in_cawake_tasklet = false; hsi_p->counters_on = 1; hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters; spin_lock_init(&hsi_p->lock); err = hsi_port_channels_init(hsi_p); if (err < 0) goto rback; err = hsi_request_mpu_irq(hsi_p); if (err < 0) goto rback; err = hsi_request_cawake_irq(hsi_p); if (err < 0) goto rback; dev_info(hsi_ctrl->dev, "HSI port %d initialized\n", hsi_p->port_number); } return 0; rback: hsi_ports_exit(hsi_ctrl, port + 1); return err; }
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl) { struct platform_device *pd = to_platform_device(hsi_ctrl->dev); struct hsi_platform_data *pdata = pd->dev.platform_data; struct hsi_port *hsi_p; unsigned int port; int err; for (port = 0; port < hsi_ctrl->max_p; port++) { hsi_p = &hsi_ctrl->hsi_port[port]; hsi_p->port_number = port + 1; hsi_p->hsi_controller = hsi_ctrl; hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ? HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX; hsi_p->irq = 0; hsi_p->cawake_status = -1; /* Unknown */ hsi_p->cawake_off_event = false; hsi_p->acwake_status = 0; hsi_p->in_int_tasklet = false; hsi_p->in_cawake_tasklet = false; hsi_p->counters_on = 1; hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters; spin_lock_init(&hsi_p->lock); err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]); if (err < 0) goto rback1; err = hsi_request_mpu_irq(hsi_p); if (err < 0) goto rback2; err = hsi_request_cawake_irq(hsi_p); if (err < 0) goto rback3; } return 0; rback3: hsi_mpu_exit(hsi_p); rback2: hsi_ports_exit(hsi_ctrl, port + 1); rback1: return err; }
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl) { struct platform_device *pd = to_platform_device(hsi_ctrl->dev); struct hsi_platform_data *pdata = pd->dev.platform_data; struct hsi_port *hsi_p; unsigned int port; int err; for (port = 0; port < hsi_ctrl->max_p; port++) { hsi_p = &hsi_ctrl->hsi_port[port]; hsi_p->port_number = port + 1; hsi_p->hsi_controller = hsi_ctrl; hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ? HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX; hsi_p->max_ch = min(hsi_p->max_ch, (u8) HSI_PORT_MAX_CH); hsi_p->irq = 0; hsi_p->counters_on = 1; hsi_p->reg_counters = pdata->ctx.pctx[port].hsr.timeout; spin_lock_init(&hsi_p->lock); err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]); if (err < 0) goto rback1; err = hsi_request_mpu_irq(hsi_p); if (err < 0) goto rback2; err = hsi_request_cawake_irq(hsi_p); if (err < 0) goto rback3; } return 0; rback3: hsi_mpu_exit(hsi_p); rback2: hsi_ports_exit(hsi_ctrl, port + 1); rback1: return err; }
static void hsi_controller_exit(struct hsi_dev *hsi_ctrl) { hsi_gdd_exit(hsi_ctrl); hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p); }
static int __init hsi_controller_init(struct hsi_dev *hsi_ctrl, struct platform_device *pd) { struct hsi_platform_data *pdata = pd->dev.platform_data; struct resource *mem, *ioarea; int err; mem = platform_get_resource(pd, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pd->dev, "HSI device does not have " "HSI IO memory region information\n"); return -ENXIO; } dev_dbg(&pd->dev, "hsi_controller_init : IORESOURCE_MEM %s [%x, %x]\n", mem->name, mem->start, mem->end); ioarea = devm_request_mem_region(&pd->dev, mem->start, (mem->end - mem->start) + 1, dev_name(&pd->dev)); if (!ioarea) { dev_err(&pd->dev, "Unable to request HSI IO mem region\n"); return -EBUSY; } dev_dbg(&pd->dev, "hsi_controller_init : ioarea %s [%x, %x]\n", ioarea->name, ioarea->start, ioarea->end); hsi_ctrl->phy_base = mem->start; hsi_ctrl->base = devm_ioremap(&pd->dev, mem->start, (mem->end - mem->start) + 1); if (!hsi_ctrl->base) { dev_err(&pd->dev, "Unable to ioremap HSI base IO address\n"); return -ENXIO; } dev_dbg(&pd->dev, "hsi_controller_init : hsi_ctrl->base=%x\n", (unsigned int)hsi_ctrl->base); hsi_ctrl->id = pd->id; if (pdata->num_ports > HSI_MAX_PORTS) { dev_err(&pd->dev, "The HSI driver does not support enough " "ports!\n"); return -ENXIO; } hsi_ctrl->max_p = pdata->num_ports; hsi_ctrl->clock_enabled = false; hsi_ctrl->clock_rate = 0; hsi_ctrl->in_dma_tasklet = false; hsi_ctrl->fifo_mapping_strategy = pdata->fifo_mapping_strategy; hsi_ctrl->dev = &pd->dev; spin_lock_init(&hsi_ctrl->lock); err = hsi_init_gdd_chan_count(hsi_ctrl); if (err < 0) goto rback1; err = hsi_ports_init(hsi_ctrl); if (err < 0) goto rback1; err = hsi_request_gdd_irq(hsi_ctrl); if (err < 0) goto rback2; /* Everything is fine */ return 0; rback2: hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p); rback1: dev_err(&pd->dev, "Error on hsi_controller initialization\n"); return err; }
static int __init hsi_controller_init(struct hsi_dev *hsi_ctrl, struct platform_device *pd) { struct hsi_platform_data *pdata = pd->dev.platform_data; struct resource *mem, *ioarea; int err; mem = platform_get_resource(pd, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pd->dev, "HSI device does not have " "HSI IO memory region information\n"); return -ENXIO; } ioarea = devm_request_mem_region(&pd->dev, mem->start, (mem->end - mem->start) + 1, dev_name(&pd->dev)); if (!ioarea) { dev_err(&pd->dev, "Unable to request HSI IO mem region\n"); return -EBUSY; } hsi_ctrl->phy_base = mem->start; hsi_ctrl->base = devm_ioremap(&pd->dev, mem->start, (mem->end - mem->start) + 1); if (!hsi_ctrl->base) { dev_err(&pd->dev, "Unable to ioremap HSI base IO address\n"); return -ENXIO; } hsi_ctrl->id = pd->id; if (pdata->num_ports > HSI_MAX_PORTS) { dev_err(&pd->dev, "The HSI driver does not support enough " "ports!\n"); return -ENXIO; } hsi_ctrl->max_p = pdata->num_ports; hsi_ctrl->dev = &pd->dev; spin_lock_init(&hsi_ctrl->lock); hsi_ctrl->hsi_clk = clk_get(&pd->dev, "hsi_ck"); hsi_init_gdd_chan_count(hsi_ctrl); if (IS_ERR(hsi_ctrl->hsi_clk)) { dev_err(hsi_ctrl->dev, "Unable to get HSI clocks\n"); return PTR_ERR(hsi_ctrl->hsi_clk); } if (pdata->clk_notifier_register) { hsi_ctrl->hsi_nb.notifier_call = hsi_clk_event; hsi_ctrl->hsi_nb.priority = INT_MAX; /* Let's try to be first */ err = pdata->clk_notifier_register(hsi_ctrl->hsi_clk, &hsi_ctrl->hsi_nb); if (err < 0) goto rback1; } err = hsi_ports_init(hsi_ctrl); if (err < 0) goto rback2; err = hsi_request_gdd_irq(hsi_ctrl); if (err < 0) goto rback3; return 0; rback3: hsi_ports_exit(hsi_ctrl, hsi_ctrl->max_p); rback2: if (pdata->clk_notifier_unregister) pdata->clk_notifier_unregister(hsi_ctrl->hsi_clk, &hsi_ctrl->hsi_nb); rback1: clk_put(hsi_ctrl->hsi_clk); dev_err(&pd->dev, "Error on hsi_controller initialization\n"); return err; }