/* called by the modify qp verb */ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) { int err; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); union ib_gid sgid; struct ib_gid_attr sgid_attr; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { int max_dest_rd_atomic = __roundup_pow_of_two(attr->max_dest_rd_atomic); qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; free_rd_atomic_resources(qp); err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); if (err) return err; } if (mask & IB_QP_CUR_STATE) qp->attr.cur_qp_state = attr->qp_state; if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY) qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; if (mask & IB_QP_ACCESS_FLAGS) qp->attr.qp_access_flags = attr->qp_access_flags; if (mask & IB_QP_PKEY_INDEX) qp->attr.pkey_index = attr->pkey_index; if (mask & IB_QP_PORT) qp->attr.port_num = attr->port_num; if (mask & IB_QP_QKEY) qp->attr.qkey = attr->qkey; if (mask & IB_QP_AV) { ib_get_cached_gid(&rxe->ib_dev, 1, rdma_ah_read_grh(&attr->ah_attr)->sgid_index, &sgid, &sgid_attr); rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av, &attr->ah_attr); rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr, &sgid_attr, &sgid); if (sgid_attr.ndev) dev_put(sgid_attr.ndev); } if (mask & IB_QP_ALT_PATH) { u8 sgid_index = rdma_ah_read_grh(&attr->alt_ah_attr)->sgid_index; ib_get_cached_gid(&rxe->ib_dev, 1, sgid_index, &sgid, &sgid_attr); rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av, &attr->alt_ah_attr); rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr, &sgid_attr, &sgid); if (sgid_attr.ndev) dev_put(sgid_attr.ndev); qp->attr.alt_port_num = attr->alt_port_num; qp->attr.alt_pkey_index = attr->alt_pkey_index; qp->attr.alt_timeout = attr->alt_timeout; } if (mask & IB_QP_PATH_MTU) { qp->attr.path_mtu = attr->path_mtu; qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); } if (mask & IB_QP_TIMEOUT) { qp->attr.timeout = attr->timeout; if (attr->timeout == 0) { qp->qp_timeout_jiffies = 0; } else { /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */ int j = nsecs_to_jiffies(4096ULL << attr->timeout); qp->qp_timeout_jiffies = j ? j : 1; } } if (mask & IB_QP_RETRY_CNT) { qp->attr.retry_cnt = attr->retry_cnt; qp->comp.retry_cnt = attr->retry_cnt; pr_debug("qp#%d set retry count = %d\n", qp_num(qp), attr->retry_cnt); } if (mask & IB_QP_RNR_RETRY) { qp->attr.rnr_retry = attr->rnr_retry; qp->comp.rnr_retry = attr->rnr_retry; pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp), attr->rnr_retry); } if (mask & IB_QP_RQ_PSN) { qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); qp->resp.psn = qp->attr.rq_psn; pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp), qp->resp.psn); } if (mask & IB_QP_MIN_RNR_TIMER) { qp->attr.min_rnr_timer = attr->min_rnr_timer; pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp), attr->min_rnr_timer); } if (mask & IB_QP_SQ_PSN) { qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); qp->req.psn = qp->attr.sq_psn; qp->comp.psn = qp->attr.sq_psn; pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn); } if (mask & IB_QP_PATH_MIG_STATE) qp->attr.path_mig_state = attr->path_mig_state; if (mask & IB_QP_DEST_QPN) qp->attr.dest_qp_num = attr->dest_qp_num; if (mask & IB_QP_STATE) { qp->attr.qp_state = attr->qp_state; switch (attr->qp_state) { case IB_QPS_RESET: pr_debug("qp#%d state -> RESET\n", qp_num(qp)); rxe_qp_reset(qp); break; case IB_QPS_INIT: pr_debug("qp#%d state -> INIT\n", qp_num(qp)); qp->req.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT; break; case IB_QPS_RTR: pr_debug("qp#%d state -> RTR\n", qp_num(qp)); qp->resp.state = QP_STATE_READY; break; case IB_QPS_RTS: pr_debug("qp#%d state -> RTS\n", qp_num(qp)); qp->req.state = QP_STATE_READY; break; case IB_QPS_SQD: pr_debug("qp#%d state -> SQD\n", qp_num(qp)); rxe_qp_drain(qp); break; case IB_QPS_SQE: pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp)); /* Not possible from modify_qp. */ break; case IB_QPS_ERR: pr_debug("qp#%d state -> ERR\n", qp_num(qp)); rxe_qp_error(qp); break; } } return 0; }
/** * dbbt_check - Check if DBBT is readable and consistent to the mtd BBT * @mtd: The mtd Nand device * @dbbt: The page where the DBBT is found * * This function checks if the DBBT is readable and consistent to the mtd * layers idea of bad blocks. * * return: 0 if the DBBT is readable and consistent to the mtd BBT, a * negative error code otherwise. */ static int dbbt_check(struct mtd_info *mtd, int page) { int ret, needs_cleanup = 0; size_t r; void *dbbt_header; void *dbbt_entries = NULL; struct dbbt_block *dbbt; int num_blocks = mtd_div_by_eb(mtd->size, mtd); int n; dbbt_header = xmalloc(mtd->writesize); ret = mtd_read(mtd, page * mtd->writesize, mtd->writesize, &r, dbbt_header); if (ret == -EUCLEAN) { pr_warn("page %d needs cleaning\n", page); needs_cleanup = 1; } else if (ret < 0) { pr_err("Cannot read page %d: %s\n", page, strerror(-ret)); goto out; } dbbt = dbbt_header; if (dbbt->FingerPrint != 0x54424244) { pr_err("dbbt at page %d is readable but does not contain a valid DBBT\n", page); ret = -EINVAL; goto out; } if (dbbt->DBBTNumOfPages) { dbbt_entries = xmalloc(mtd->writesize); ret = mtd_read(mtd, (page + 4) * mtd->writesize, mtd->writesize, &r, dbbt_entries); if (ret == -EUCLEAN) { pr_warn("page %d needs cleaning\n", page); needs_cleanup = 1; } else if (ret < 0) { pr_err("Cannot read page %d: %s\n", page, strerror(-ret)); goto out; } } else { dbbt_entries = NULL; } for (n = 0; n < num_blocks; n++) { if (mtd_peb_is_bad(mtd, n) != dbbt_block_is_bad(dbbt_entries, n)) { ret = -EINVAL; goto out; } } ret = 0; out: free(dbbt_header); free(dbbt_entries); if (ret < 0) return ret; if (needs_cleanup) return -EUCLEAN; return 0; }
static void read_firmware_all(struct mtd_info *mtd, struct fcb_block *fcb, void **data, int *len, int *used_refresh, int *unused_refresh, int *used) { void *primary = NULL, *secondary = NULL; int pages_per_block = mtd->erasesize / mtd->writesize; int fw0 = imx_bbu_firmware_start_block(mtd, 0) * pages_per_block; int fw1 = imx_bbu_firmware_start_block(mtd, 1) * pages_per_block; int first, ret, primary_refresh = 0, secondary_refresh = 0; *used_refresh = 0; *unused_refresh = 0; if (fcb->Firmware1_startingPage == fw0 && fcb->Firmware2_startingPage == fw1) { first = 0; } else if (fcb->Firmware1_startingPage == fw1 && fcb->Firmware2_startingPage == fw0) { first = 1; } else { pr_warn("FCB is not what we expect. Update will not be robust\n"); *used = 0; return; } if (fcb->PagesInFirmware1 != fcb->PagesInFirmware2) { pr_warn("FCB is not what we expect. Update will not be robust\n"); return; } *len = fcb->PagesInFirmware1 * mtd->writesize; ret = read_firmware(mtd, fcb->Firmware1_startingPage, fcb->PagesInFirmware1, &primary); if (ret > 0) primary_refresh = 1; ret = read_firmware(mtd, fcb->Firmware2_startingPage, fcb->PagesInFirmware2, &secondary); if (ret > 0) secondary_refresh = 1; if (!primary && !secondary) { *unused_refresh = 1; *used_refresh = 1; *used = 0; *data = NULL; } else if (primary && !secondary) { *used_refresh = primary_refresh; *unused_refresh = 1; *used = first; *data = primary; return; } else if (secondary && !primary) { *used_refresh = secondary_refresh; *unused_refresh = 1; *used = !first; *data = secondary; } else { if (memcmp(primary, secondary, fcb->PagesInFirmware1 * mtd->writesize)) *unused_refresh = 1; *used_refresh = primary_refresh; *used = first; *data = primary; free(secondary); } pr_info("Primary firmware is on pages %d-%d, %svalid, %s\n", fcb->Firmware1_startingPage, fcb->Firmware1_startingPage + fcb->PagesInFirmware1, primary ? "" : "in", primary_refresh ? "needs cleanup" : "clean"); pr_info("secondary firmware is on pages %d-%d, %svalid, %s\n", fcb->Firmware2_startingPage, fcb->Firmware2_startingPage + fcb->PagesInFirmware2, secondary ? "" : "in", secondary_refresh ? "needs cleanup" : "clean"); pr_info("ROM uses slot %d\n", *used); }
static int __init cps_pm_init(void) { unsigned cpu; int err; /* Detect appropriate sync types for the system */ switch (current_cpu_data.cputype) { case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_M5150: case CPU_P5600: stype_intervention = 0x2; stype_memory = 0x3; stype_ordering = 0x10; break; default: pr_warn("Power management is using heavyweight sync 0\n"); } /* A CM is required for all non-coherent states */ if (!mips_cm_present()) { pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); goto out; } /* * If interrupts were enabled whilst running a wait instruction on a * non-coherent core then the VPE may end up processing interrupts * whilst non-coherent. That would be bad. */ if (cpu_wait == r4k_wait_irqoff) set_bit(CPS_PM_NC_WAIT, state_support); else pr_warn("pm-cps: non-coherent wait unavailable\n"); /* Detect whether a CPC is present */ if (mips_cpc_present()) { /* Detect whether clock gating is implemented */ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) set_bit(CPS_PM_CLOCK_GATED, state_support); else pr_warn("pm-cps: CPC does not support clock gating\n"); /* Power gating is available with CPS SMP & any CPC */ if (mips_cps_smp_in_use()) set_bit(CPS_PM_POWER_GATED, state_support); else pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); } else { pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); } for_each_present_cpu(cpu) { err = cps_gen_core_entries(cpu); if (err) return err; } out: return 0; }
static int ak8963c_selftest(struct ak8963c_p *data, int *sf) { u8 temp[6], reg; s16 x, y, z; int retry_count = 0; int ready_count = 0; int ret; retry: mutex_lock(&data->lock); /* power down */ reg = AK8963C_CNTL1_POWER_DOWN; ak8963c_smbus_write_byte(data->client, AK8963C_REG_CNTL1, ®); /* read device info */ ak8963c_smbus_read_byte_block(data->client, AK8963C_REG_WIA, temp, 2); pr_info("[SENSOR]: %s - device id = 0x%x, info = 0x%x\n", __func__, temp[0], temp[1]); /* set ATSC self test bit to 1 */ reg = 0x40; ak8963c_smbus_write_byte(data->client, AK8963C_REG_ASTC, ®); /* start self test */ reg = AK8963C_CNTL1_SELF_TEST; ak8963c_smbus_write_byte(data->client, AK8963C_REG_CNTL1, ®); /* wait for data ready */ while (ready_count < 10) { msleep(20); ret = ak8963c_smbus_read_byte(data->client, AK8963C_REG_ST1, ®); if ((reg == 1) && (ret == 0)) break; ready_count++; } ak8963c_smbus_read_byte_block(data->client, AK8963C_REG_HXL, temp, sizeof(temp)); /* set ATSC self test bit to 0 */ reg = 0x00; ak8963c_smbus_write_byte(data->client, AK8963C_REG_ASTC, ®); mutex_unlock(&data->lock); x = temp[0] | (temp[1] << 8); y = temp[2] | (temp[3] << 8); z = temp[4] | (temp[5] << 8); /* Hadj = (H*(Asa+128))/256 */ x = (x * (data->asa[0] + 128)) >> 8; y = (y * (data->asa[1] + 128)) >> 8; z = (z * (data->asa[2] + 128)) >> 8; pr_info("[SENSOR]: %s - self test x = %d, y = %d, z = %d\n", __func__, x, y, z); if ((x >= -200) && (x <= 200)) pr_info("[SENSOR]: %s - x passed self test, -200<=x<=200\n", __func__); else pr_info("[SENSOR]: %s - x failed self test, -200<=x<=200\n", __func__); if ((y >= -200) && (y <= 200)) pr_info("[SENSOR]: %s - y passed self test, -200<=y<=200\n", __func__); else pr_info("[SENSOR]: %s - y failed self test, -200<=y<=200\n", __func__); if ((z >= -3200) && (z <= -800)) pr_info("[SENSOR]: %s - z passed self test, -3200<=z<=-800\n", __func__); else pr_info("[SENSOR]: %s - z failed self test, -3200<=z<=-800\n", __func__); sf[0] = x; sf[1] = y; sf[2] = z; if (((x >= -200) && (x <= 200)) && ((y >= -200) && (y <= 200)) && ((z >= -3200) && (z <= -800))) { pr_info("%s, Selftest is successful.\n", __func__); return 1; } else { if (retry_count < 5) { retry_count++; pr_warn("############################################"); pr_warn("%s, retry_count=%d\n", __func__, retry_count); pr_warn("############################################"); goto retry; } else { pr_err("[SENSOR]: %s - Selftest is failed.\n", __func__); return 0; } } }
static void max14577_muic_detect_dev(struct max14577_muic_data *muic_data, int irq) { struct i2c_client *i2c = muic_data->i2c; const struct max14577_muic_vps_data *tmp_vps; enum muic_attached_dev new_dev = ATTACHED_DEV_UNKNOWN_MUIC; int intr = MUIC_INTR_DETACH; u8 status[2], ctrl2, ctrl3; u8 adcerr, adclow, adc, vbvolt, chgdetrun, chgtyp, adcen; int ret; int i; ret = max14577_bulk_read(i2c, MAX14577_MUIC_REG_STATUS1, 2, status); if (ret) { pr_err("%s:%s fail to read STAT (%d)\n", MUIC_DEV_NAME, __func__, ret); return; } ret = max14577_read_reg(muic_data->i2c, MAX14577_REG_CONTROL2, &ctrl2); if (ret) { pr_err("%s:%s fail to read CTRL2 (%d)\n", MUIC_DEV_NAME, __func__, ret); return; } ret = max14577_read_reg(muic_data->i2c, MAX14577_MUIC_REG_CONTROL3, &ctrl3); if (ret) { pr_err("%s:%s fail to read CTRL3 (%d)\n", MUIC_DEV_NAME, __func__, ret); return; } pr_info("%s:%s STATUS1:0x%02x, 2:0x%02x, ctrl2:0x%02x\n", MUIC_DEV_NAME, __func__, status[0], status[1], ctrl2); adcerr = status[0] & STATUS1_ADCERR_MASK; adclow = status[0] & STATUS1_ADCLOW_MASK; adc = status[0] & STATUS1_ADC_MASK; vbvolt = status[1] & STATUS2_VBVOLT_MASK; chgdetrun = status[1] & STATUS2_CHGDETRUN_MASK; chgtyp = status[1] & STATUS2_CHGTYP_MASK; adcen = ctrl2 & CTRL2_ADCEN_MASK; pr_info("%s:%s adcerr:0x%x adclow:0x%x adc:0x%x vb:0x%x chgdetrun:0x%x" " chgtyp:0x%x adcen:0x%x\n", MUIC_DEV_NAME, __func__, adcerr, adclow, adc, vbvolt, chgdetrun, chgtyp, adcen); /* Workaround for Factory mode. * Abandon adc interrupt of approximately +-100K range * if previous cable status was JIG UART BOOT OFF. */ if (muic_data->attached_dev == ATTACHED_DEV_JIG_UART_OFF_MUIC) { if ((adc == (ADC_JIG_UART_OFF + 1)) || (adc == (ADC_JIG_UART_OFF - 1))) { pr_warn("%s:%s abandon ADC\n", MUIC_DEV_NAME, __func__); return; } } for (i = 0; i < ARRAY_SIZE(muic_vps_table); i++) { tmp_vps = &(muic_vps_table[i]); if (tmp_vps->adcerr != adcerr) continue; if (tmp_vps->adclow != adclow) continue; if (tmp_vps->adc != adc) continue; if (tmp_vps->vbvolt != vbvolt) continue; if (tmp_vps->chgdetrun != chgdetrun) continue; if (tmp_vps->chgtyp != chgtyp) continue; pr_info("%s:%s vps table match found at i(%d), %s\n", MUIC_DEV_NAME, __func__, i, tmp_vps->vps_name); new_dev = tmp_vps->attached_dev; intr = MUIC_INTR_ATTACH; break; } if (intr == MUIC_INTR_ATTACH) { pr_info("%s:%s ATTACHED\n", MUIC_DEV_NAME, __func__); max14577_muic_handle_attach(muic_data, new_dev); } else { pr_info("%s:%s DETACHED\n", MUIC_DEV_NAME, __func__); max14577_muic_handle_detach(muic_data); } return; }
static int rfkill_gpio_probe(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill; struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; int ret = 0; int len = 0; if (!pdata) { pr_warn("%s: No platform data specified\n", __func__); return -EINVAL; } /* */ if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) && !gpio_is_valid(pdata->shutdown_gpio))) { pr_warn("%s: invalid platform data\n", __func__); return -EINVAL; } rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); if (!rfkill) return -ENOMEM; if (pdata->gpio_runtime_setup) { ret = pdata->gpio_runtime_setup(pdev); if (ret) { pr_warn("%s: can't set up gpio\n", __func__); goto fail_alloc; } } rfkill->pdata = pdata; len = strlen(pdata->name); rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL); if (!rfkill->reset_name) { ret = -ENOMEM; goto fail_alloc; } rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL); if (!rfkill->shutdown_name) { ret = -ENOMEM; goto fail_reset_name; } snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name); snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name); if (pdata->power_clk_name) { rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name); if (IS_ERR(rfkill->pwr_clk)) { pr_warn("%s: can't find pwr_clk.\n", __func__); goto fail_shutdown_name; } } if (gpio_is_valid(pdata->reset_gpio)) { ret = gpio_request(pdata->reset_gpio, rfkill->reset_name); if (ret) { pr_warn("%s: failed to get reset gpio.\n", __func__); goto fail_clock; } } if (gpio_is_valid(pdata->shutdown_gpio)) { ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name); if (ret) { pr_warn("%s: failed to get shutdown gpio.\n", __func__); goto fail_reset; } } rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, &rfkill_gpio_ops, rfkill); if (!rfkill->rfkill_dev) goto fail_shutdown; ret = rfkill_register(rfkill->rfkill_dev); if (ret < 0) goto fail_rfkill; platform_set_drvdata(pdev, rfkill); dev_info(&pdev->dev, "%s device registered.\n", pdata->name); return 0; fail_rfkill: rfkill_destroy(rfkill->rfkill_dev); fail_shutdown: if (gpio_is_valid(pdata->shutdown_gpio)) gpio_free(pdata->shutdown_gpio); fail_reset: if (gpio_is_valid(pdata->reset_gpio)) gpio_free(pdata->reset_gpio); fail_clock: if (rfkill->pwr_clk) clk_put(rfkill->pwr_clk); fail_shutdown_name: kfree(rfkill->shutdown_name); fail_reset_name: kfree(rfkill->reset_name); fail_alloc: kfree(rfkill); return ret; }
/* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into the flash, XIP-style */ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { struct jffs2_unknown_node *node; struct jffs2_unknown_node crcnode; uint32_t ofs, prevofs, max_ofs; uint32_t hdr_crc, buf_ofs, buf_len; int err; int noise = 0; #ifdef CONFIG_JFFS2_FS_WRITEBUFFER int cleanmarkerfound = 0; #endif ofs = jeb->offset; prevofs = jeb->offset - 1; jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { int ret; if (mtd_block_isbad(c->mtd, jeb->offset)) return BLK_STATE_BADBLOCK; ret = jffs2_check_nand_cleanmarker(c, jeb); jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); /* Even if it's not found, we still scan to see if the block is empty. We use this information to decide whether to erase it or not. */ switch (ret) { case 0: cleanmarkerfound = 1; break; case 1: break; default: return ret; } } #endif if (jffs2_sum_active()) { struct jffs2_sum_marker *sm; void *sumptr = NULL; uint32_t sumlen; if (!buf_size) { /* XIP case. Just look, point at the summary if it's there */ sm = (void *)buf + c->sector_size - sizeof(*sm); if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { sumptr = buf + je32_to_cpu(sm->offset); sumlen = c->sector_size - je32_to_cpu(sm->offset); } } else { /* If NAND flash, read a whole page of it. Else just the end */ if (c->wbuf_pagesize) buf_len = c->wbuf_pagesize; else buf_len = sizeof(*sm); /* Read as much as we want into the _end_ of the preallocated buffer */ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, jeb->offset + c->sector_size - buf_len, buf_len); if (err) return err; sm = (void *)buf + buf_size - sizeof(*sm); if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { sumlen = c->sector_size - je32_to_cpu(sm->offset); sumptr = buf + buf_size - sumlen; /* sm->offset maybe wrong but MAGIC maybe right */ if (sumlen > c->sector_size) goto full_scan; /* Now, make sure the summary itself is available */ if (sumlen > buf_size) { /* Need to kmalloc for this. */ sumptr = kmalloc(sumlen, GFP_KERNEL); if (!sumptr) return -ENOMEM; memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); } if (buf_len < sumlen) { /* Need to read more so that the entire summary node is present */ err = jffs2_fill_scan_buf(c, sumptr, jeb->offset + c->sector_size - sumlen, sumlen - buf_len); if (err) return err; } } } if (sumptr) { err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); if (buf_size && sumlen > buf_size) kfree(sumptr); /* If it returns with a real error, bail. If it returns positive, that's a block classification (i.e. BLK_STATE_xxx) so return that too. If it returns zero, fall through to full scan. */ if (err) return err; } } full_scan: buf_ofs = jeb->offset; if (!buf_size) { /* This is the XIP case -- we're reading _directly_ from the flash chip */ buf_len = c->sector_size; } else { buf_len = EMPTY_SCAN_SIZE(c->sector_size); err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); if (err) return err; } /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ ofs = 0; max_ofs = EMPTY_SCAN_SIZE(c->sector_size); /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */ while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) ofs += 4; if (ofs == max_ofs) { #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { /* scan oob, take care of cleanmarker */ int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", ret); switch (ret) { case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; case 1: return BLK_STATE_ALLDIRTY; default: return ret; } } #endif jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", jeb->offset); if (c->cleanmarker_size == 0) return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ else return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ } if (ofs) { jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, jeb->offset + ofs); if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) return err; if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) return err; } /* Now ofs is a complete physical flash offset as it always was... */ ofs += jeb->offset; noise = 10; dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); scan_more: while(ofs < jeb->offset + c->sector_size) { jffs2_dbg_acct_paranoia_check_nolock(c, jeb); /* Make sure there are node refs available for use */ err = jffs2_prealloc_raw_node_refs(c, jeb, 2); if (err) return err; cond_resched(); if (ofs & 3) { pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); ofs = PAD(ofs); continue; } if (ofs == prevofs) { pr_warn("ofs 0x%08x has already been seen. Skipping\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } prevofs = ofs; if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), jeb->offset, c->sector_size, ofs, sizeof(*node)); if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) return err; break; } if (buf_ofs + buf_len < ofs + sizeof(*node)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", sizeof(struct jffs2_unknown_node), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; } node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { uint32_t inbuf_ofs; uint32_t empty_start, scan_end; empty_start = ofs; ofs += 4; scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); more_empty: inbuf_ofs = ofs - buf_ofs; while (inbuf_ofs < scan_end) { if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { pr_warn("Empty flash at 0x%08x ends at 0x%08x\n", empty_start, ofs); if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) return err; goto scan_more; } inbuf_ofs+=4; ofs += 4; } /* Ran off end. */ jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", ofs); /* If we're only checking the beginning of a block with a cleanmarker, bail now */ if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)); return BLK_STATE_CLEANMARKER; } if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ scan_end = buf_len; goto more_empty; } /* See how much more there is to read in this eraseblock... */ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); if (!buf_len) { /* No more to read. Break out of main loop without marking this range of empty space as dirty (because it's not) */ jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", empty_start); break; } /* point never reaches here */ scan_end = buf_len; jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; goto more_empty; } if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs); pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n"); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { /* OK. We're out of possibilities. Whinge and move on */ noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", __func__, JFFS2_MAGIC_BITMASK, ofs, je16_to_cpu(node->magic)); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } /* We seem to have a node of sorts. Check the CRC */ crcnode.magic = node->magic; crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); crcnode.totlen = node->totlen; hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (hdr_crc != je32_to_cpu(node->hdr_crc)) { noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", __func__, ofs, je16_to_cpu(node->magic), je16_to_cpu(node->nodetype), je32_to_cpu(node->totlen), je32_to_cpu(node->hdr_crc), hdr_crc); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { /* Eep. Node goes over the end of the erase block. */ pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", ofs, je32_to_cpu(node->totlen)); pr_warn("Perhaps the file system was created with the wrong erase size?\n"); if ((err = jffs2_scan_dirty_space(c, jeb, 4))) return err; ofs += 4; continue; } if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { /* Wheee. This is an obsoleted node */ jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", ofs); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); continue; } switch(je16_to_cpu(node->nodetype)) { case JFFS2_NODETYPE_INODE: if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", sizeof(struct jffs2_raw_inode), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_NODETYPE_DIRENT: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; #ifdef CONFIG_JFFS2_FS_XATTR case JFFS2_NODETYPE_XATTR: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_NODETYPE_XREF: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", je32_to_cpu(node->totlen), buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; #endif /* CONFIG_JFFS2_FS_XATTR */ case JFFS2_NODETYPE_CLEANMARKER: jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else if (jeb->first_node) { pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else { jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); ofs += PAD(c->cleanmarker_size); } break; case JFFS2_NODETYPE_PADDING: if (jffs2_sum_active()) jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; default: switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { case JFFS2_FEATURE_ROCOMPAT: pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); c->flags |= JFFS2_SB_FLAG_RO; if (!(jffs2_is_readonly(c))) return -EROFS; if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_FEATURE_INCOMPAT: pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); return -EINVAL; case JFFS2_FEATURE_RWCOMPAT_DELETE: jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_FEATURE_RWCOMPAT_COPY: { jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); /* We can't summarise nodes we don't grok */ jffs2_sum_disable_collecting(s); ofs += PAD(je32_to_cpu(node->totlen)); break; } } } } if (jffs2_sum_active()) { if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { dbg_summary("There is not enough space for " "summary information, disabling for this jeb!\n"); jffs2_sum_disable_collecting(s); } } jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", jeb->offset, jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size); /* mark_node_obsolete can add to wasted !! */ if (jeb->wasted_size) { jeb->dirty_size += jeb->wasted_size; c->dirty_size += jeb->wasted_size; c->wasted_size -= jeb->wasted_size; jeb->wasted_size = 0; } return jffs2_scan_classify_jeb(c, jeb); }
int jffs2_scan_medium(struct jffs2_sb_info *c) { int i, ret; uint32_t empty_blocks = 0, bad_blocks = 0; unsigned char *flashbuf = NULL; uint32_t buf_size = 0; struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ #ifndef __ECOS size_t pointlen, try_size; ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, (void **)&flashbuf, NULL); if (!ret && pointlen < c->mtd->size) { /* Don't muck about if it won't let us point to the whole flash */ jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", pointlen); mtd_unpoint(c->mtd, 0, pointlen); flashbuf = NULL; } if (ret && ret != -EOPNOTSUPP) jffs2_dbg(1, "MTD point failed %d\n", ret); #endif if (!flashbuf) { /* For NAND it's quicker to read a whole eraseblock at a time, apparently */ if (jffs2_cleanmarker_oob(c)) try_size = c->sector_size; else try_size = PAGE_SIZE; jffs2_dbg(1, "Trying to allocate readbuf of %zu " "bytes\n", try_size); flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); if (!flashbuf) return -ENOMEM; jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", try_size); buf_size = (uint32_t)try_size; } if (jffs2_sum_active()) { s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); if (!s) { JFFS2_WARNING("Can't allocate memory for summary\n"); ret = -ENOMEM; goto out; } } for (i=0; i<c->nr_blocks; i++) { struct jffs2_eraseblock *jeb = &c->blocks[i]; cond_resched(); /* reset summary info for next eraseblock scan */ jffs2_sum_reset_collected(s); ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size, s); if (ret < 0) goto out; jffs2_dbg_acct_paranoia_check_nolock(c, jeb); /* Now decide which list to put it on */ switch(ret) { case BLK_STATE_ALLFF: /* * Empty block. Since we can't be sure it * was entirely erased, we just queue it for erase * again. It will be marked as such when the erase * is complete. Meanwhile we still count it as empty * for later checks. */ empty_blocks++; list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; break; case BLK_STATE_CLEANMARKER: /* Only a CLEANMARKER node is valid */ if (!jeb->dirty_size) { /* It's actually free */ list_add(&jeb->list, &c->free_list); c->nr_free_blocks++; } else { /* Dirt */ jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; } break; case BLK_STATE_CLEAN: /* Full (or almost full) of clean data. Clean list */ list_add(&jeb->list, &c->clean_list); break; case BLK_STATE_PARTDIRTY: /* Some data, but not full. Dirty list. */ /* We want to remember the block with most free space and stick it in the 'nextblock' position to start writing to it. */ if (jeb->free_size > min_free(c) && (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { /* Better candidate for the next writes to go to */ if (c->nextblock) { ret = file_dirty(c, c->nextblock); if (ret) goto out; /* deleting summary information of the old nextblock */ jffs2_sum_reset_collected(c->summary); } /* update collected summary information for the current nextblock */ jffs2_sum_move_collected(c, s); jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", __func__, jeb->offset); c->nextblock = jeb; } else { ret = file_dirty(c, jeb); if (ret) goto out; } break; case BLK_STATE_ALLDIRTY: /* Nothing valid - not even a clean marker. Needs erasing. */ /* For now we just put it on the erasing list. We'll start the erases later */ jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; break; case BLK_STATE_BADBLOCK: jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); list_add(&jeb->list, &c->bad_list); c->bad_size += c->sector_size; c->free_size -= c->sector_size; bad_blocks++; break; default: pr_warn("%s(): unknown block state\n", __func__); BUG(); } } /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ if (c->nextblock && (c->nextblock->dirty_size)) { c->nextblock->wasted_size += c->nextblock->dirty_size; c->wasted_size += c->nextblock->dirty_size; c->dirty_size -= c->nextblock->dirty_size; c->nextblock->dirty_size = 0; } #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { /* If we're going to start writing into a block which already contains data, and the end of the data isn't page-aligned, skip a little and align it. */ uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", __func__, skip); jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); jffs2_scan_dirty_space(c, c->nextblock, skip); } #endif if (c->nr_erasing_blocks) { if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", empty_blocks, bad_blocks, c->nr_blocks); ret = -EIO; goto out; } spin_lock(&c->erase_completion_lock); jffs2_garbage_collect_trigger(c); spin_unlock(&c->erase_completion_lock); } ret = 0; out: if (buf_size) kfree(flashbuf); #ifndef __ECOS else mtd_unpoint(c->mtd, 0, c->mtd->size); #endif kfree(s); return ret; }
/* This condition should not occur with a commercially deployed processor. Display reminder for early engr test or demo chips / FPGA bitstreams */ static int __init check_s32c1i(void) { pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); return 0; }
void __init sunxi_timer_init(void) { #ifdef CONFIG_OF struct device_node *node; struct clk *clk; #endif unsigned long rate = 0; int ret, irq; u32 val; #ifdef CONFIG_OF node = of_find_matching_node(NULL, sunxi_timer_dt_ids); if (!node) panic("No sunxi timer node"); timer_base = of_iomap(node, 0); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); rate = clk_get_rate(clk); #else timer_base = (void __iomem *)SUNXI_TIMER_VBASE; irq = SUNXI_IRQ_TIMER0; #if defined(CONFIG_ARCH_SUN8IW3P1) && defined(CONFIG_FPGA_V4_PLATFORM) rate = 32000; /* it is proved by test that clk-src=32000, prescale=1 on fpga */ #else rate = 24000000; #endif #endif writel(rate / (TIMER_SCAL * HZ), timer_base + TIMER0_INTVAL_REG); /* set clock source to HOSC, 16 pre-division */ val = readl(timer_base + TIMER0_CTL_REG); val &= ~(0x07 << 4); val &= ~(0x03 << 2); val |= (4 << 4) | (1 << 2); writel(val, timer_base + TIMER0_CTL_REG); /* set mode to auto reload */ val = readl(timer_base + TIMER0_CTL_REG); writel(val | TIMER0_CTL_AUTORELOAD, timer_base + TIMER0_CTL_REG); ret = setup_irq(irq, &sunxi_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_CTL_REG); writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG); sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL, NSEC_PER_SEC, sunxi_clockevent.shift); sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0x7fffffff, &sunxi_clockevent); sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x10, &sunxi_clockevent); sunxi_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&sunxi_clockevent); }
static int dib7090_fe_cfg_get(struct aml_dvb *advb, struct platform_device *pdev, int id, struct nim7090_adapter_state **pcfg) { struct resource *res; char buf[32]; int ret=0; struct nim7090_adapter_state *cfg; cfg = kzalloc(sizeof(struct nim7090_adapter_state), GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->i2c_id = fe_i2c; if(cfg->i2c_id == -1) { snprintf(buf, sizeof(buf), "frontend%d_i2c", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_error("cannot get resource \"%s\"\n", buf); ret = -EINVAL; goto err_resource; } cfg->i2c_id = res->start; fe_i2c = cfg->i2c_id; } cfg->demod_addr = fe_demod_addr; if(cfg->demod_addr==-1) { snprintf(buf, sizeof(buf), "frontend%d_demod_addr", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_error("cannot get resource \"%s\"\n", buf); ret = -EINVAL; goto err_resource; } cfg->demod_addr = res->start; fe_demod_addr = cfg->demod_addr; } cfg->reset_pin = fe_demod_rst_pin; if(cfg->reset_pin==-1) { snprintf(buf, sizeof(buf), "frontend%d_reset_pin", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_warn("cannot get resource \"%s\"\n", buf); cfg->reset_pin = 0; } else { cfg->reset_pin = res->start; } fe_demod_rst_pin = cfg->demod_addr; } cfg->reset_val_en= fe_demod_rst_val_en; if(cfg->reset_val_en==-1) { snprintf(buf, sizeof(buf), "frontend%d_reset_value_enable", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_warn("cannot get resource \"%s\"\n", buf); } else { cfg->reset_val_en = res->start; } fe_demod_rst_val_en = cfg->reset_val_en; } cfg->power_pin = fe_demod_pwr_pin; if(cfg->power_pin==-1) { snprintf(buf, sizeof(buf), "frontend%d_power_pin", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_warn("cannot get resource \"%s\"\n", buf); cfg->power_pin = 0; } else { cfg->power_pin = res->start; } fe_demod_pwr_pin = cfg->power_pin; } cfg->power_val_en= fe_demod_pwr_val_en; if(cfg->power_val_en==-1) { snprintf(buf, sizeof(buf), "frontend%d_power_value_enable", id); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf); if (!res) { pr_warn("cannot get resource \"%s\"\n", buf); } else { cfg->power_val_en = res->start; } fe_demod_pwr_val_en = cfg->power_val_en; } *pcfg = cfg; return 0; err_resource: kfree(cfg); return ret; }
static irqreturn_t max77849_irq_thread(int irq, void *data) { struct max77849_dev *max77849 = data; u8 irq_reg[MAX77849_IRQ_GROUP_NR] = {0}; u8 tmp_irq_reg[MAX77849_IRQ_GROUP_NR] = {}; u8 irq_src; int ret; int i; /* INTMASK1 3:ADC1K 0:ADC */ /* INTMASK2 4:VBVolt 0:Chgtype */ max77849_write_reg(max77849->muic, MAX77849_MUIC_REG_INTMASK1, 0x09); max77849_write_reg(max77849->muic, MAX77849_MUIC_REG_INTMASK2, 0x11); clear_retry: ret = max77849_read_reg(max77849->i2c, MAX77849_PMIC_REG_INTSRC, &irq_src); if (ret < 0) { dev_err(max77849->dev, "Failed to read interrupt source: %d\n", ret); return IRQ_NONE; } pr_info("%s: interrupt source(0x%02x)\n", __func__, irq_src); if (irq_src & MAX77849_IRQSRC_CHG) { /* CHG_INT */ ret = max77849_read_reg(max77849->i2c, MAX77849_CHG_REG_CHG_INT, &irq_reg[CHG_INT]); pr_info("%s: charger interrupt(0x%02x)\n", __func__, irq_reg[CHG_INT]); /* mask chgin to prevent chgin infinite interrupt * chgin is unmasked chgin isr */ if (irq_reg[CHG_INT] & max77849_irqs[MAX77849_CHG_IRQ_CHGIN_I].mask) { u8 reg_data; max77849_read_reg(max77849->i2c, MAX77849_CHG_REG_CHG_INT_MASK, ®_data); reg_data |= (1 << 6); max77849_write_reg(max77849->i2c, MAX77849_CHG_REG_CHG_INT_MASK, reg_data); } } if (irq_src & MAX77849_IRQSRC_TOP) { /* TOPSYS_INT */ ret = max77849_read_reg(max77849->i2c, MAX77849_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]); pr_info("%s: topsys interrupt(0x%02x)\n", __func__, irq_reg[TOPSYS_INT]); } if (irq_src & MAX77849_IRQSRC_MUIC) { /* MUIC INT1 ~ INT3 */ ret = max77849_bulk_read(max77849->muic, MAX77849_MUIC_REG_INT1, MAX77849_NUM_IRQ_MUIC_REGS, &tmp_irq_reg[MUIC_INT1]); /* Or temp irq register to irq register for if it retries */ for (i = MUIC_INT1; i < MAX77849_IRQ_GROUP_NR; i++) irq_reg[i] |= tmp_irq_reg[i]; pr_warn("%s: muic interrupt(0x%02x, 0x%02x, 0x%02x)\n", __func__, irq_reg[MUIC_INT1], irq_reg[MUIC_INT2], irq_reg[MUIC_INT3]); } pr_debug("%s: irq gpio post-state(0x%02x)\n", __func__, gpio_get_value(max77849->irq_gpio)); if (gpio_get_value(max77849->irq_gpio) == 0) { pr_warn("%s: irq_gpio is not High!\n", __func__); goto clear_retry; } #if 0 /* Apply masking */ for (i = 0; i < MAX77849_IRQ_GROUP_NR; i++) { if (i >= MUIC_INT1 && i <= MUIC_INT3) irq_reg[i] &= max77849->irq_masks_cur[i]; else irq_reg[i] &= ~max77849->irq_masks_cur[i]; } #endif /* Report */ for (i = 0; i < MAX77849_IRQ_NR; i++) { if (irq_reg[max77849_irqs[i].group] & max77849_irqs[i].mask) handle_nested_irq(max77849->irq_base + i); } return IRQ_HANDLED; }
static void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { int ret; uint32_t bad_offset; #ifdef __ECOS ret = jffs2_flash_erase(c, jeb); if (!ret) { jffs2_erase_succeeded(c, jeb); return; } bad_offset = jeb->offset; #else /* Linux */ struct erase_info *instr; jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", __func__, jeb->offset, jeb->offset, jeb->offset + c->sector_size); instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); if (!instr) { pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; } memset(instr, 0, sizeof(*instr)); instr->mtd = c->mtd; instr->addr = jeb->offset; instr->len = c->sector_size; instr->callback = jffs2_erase_callback; instr->priv = (unsigned long)(&instr[1]); ((struct erase_priv_struct *)instr->priv)->jeb = jeb; ((struct erase_priv_struct *)instr->priv)->c = c; ret = mtd_erase(c->mtd, instr); if (!ret) return; bad_offset = instr->fail_addr; kfree(instr); #endif /* __ECOS */ if (ret == -ENOMEM || ret == -EAGAIN) { /* Erase failed immediately. Refile it on the list */ jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret); mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); mutex_unlock(&c->erase_free_sem); return; } if (ret == -EROFS) pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); else pr_warn("Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); jffs2_erase_failed(c, jeb, bad_offset); }
static int au_cmoo(struct dentry *dentry) { int err, cmoo; unsigned int udba; struct path h_path; struct au_pin pin; struct au_cp_generic cpg = { .dentry = dentry, .bdst = -1, .bsrc = -1, .len = -1, .pin = &pin, .flags = AuCpup_DTIME | AuCpup_HOPEN }; struct inode *inode, *delegated; struct super_block *sb; struct au_sbinfo *sbinfo; struct au_fhsm *fhsm; pid_t pid; struct au_branch *br; struct dentry *parent; struct au_hinode *hdir; DiMustWriteLock(dentry); inode = dentry->d_inode; IiMustWriteLock(inode); err = 0; if (IS_ROOT(dentry)) goto out; cpg.bsrc = au_dbstart(dentry); if (!cpg.bsrc) goto out; sb = dentry->d_sb; sbinfo = au_sbi(sb); fhsm = &sbinfo->si_fhsm; pid = au_fhsm_pid(fhsm); if (pid && (current->pid == pid || current->real_parent->pid == pid)) goto out; br = au_sbr(sb, cpg.bsrc); cmoo = au_br_cmoo(br->br_perm); if (!cmoo) goto out; if (!S_ISREG(inode->i_mode)) cmoo &= AuBrAttr_COO_ALL; if (!cmoo) goto out; parent = dget_parent(dentry); di_write_lock_parent(parent); err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1); cpg.bdst = err; if (unlikely(err < 0)) { err = 0; /* there is no upper writable branch */ goto out_dgrade; } AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst); /* do not respect the coo attrib for the target branch */ err = au_cpup_dirs(dentry, cpg.bdst); if (unlikely(err)) goto out_dgrade; di_downgrade_lock(parent, AuLock_IR); udba = au_opt_udba(sb); err = au_pin(&pin, dentry, cpg.bdst, udba, AuPin_DI_LOCKED | AuPin_MNT_WRITE); if (unlikely(err)) goto out_parent; err = au_sio_cpup_simple(&cpg); au_unpin(&pin); if (unlikely(err)) goto out_parent; if (!(cmoo & AuBrWAttr_MOO)) goto out_parent; /* success */ err = au_pin(&pin, dentry, cpg.bsrc, udba, AuPin_DI_LOCKED | AuPin_MNT_WRITE); if (unlikely(err)) goto out_parent; h_path.mnt = au_br_mnt(br); h_path.dentry = au_h_dptr(dentry, cpg.bsrc); hdir = au_hi(parent->d_inode, cpg.bsrc); delegated = NULL; err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1); au_unpin(&pin); /* todo: keep h_dentry or not? */ if (unlikely(err == -EWOULDBLOCK)) { pr_warn("cannot retry for NFSv4 delegation" " for an internal unlink\n"); iput(delegated); } if (unlikely(err)) { pr_err("unlink %pd after coo failed (%d), ignored\n", dentry, err); err = 0; } goto out_parent; /* success */ out_dgrade: di_downgrade_lock(parent, AuLock_IR); out_parent: di_read_unlock(parent, AuLock_IR); dput(parent); out: AuTraceErr(err); return err; } int au_do_open(struct file *file, int (*open)(struct file *file, int flags), struct au_fidir *fidir) { int err; struct dentry *dentry; struct au_finfo *finfo; err = au_finfo_init(file, fidir); if (unlikely(err)) goto out; dentry = file->f_path.dentry; di_write_lock_child(dentry); err = au_cmoo(dentry); di_downgrade_lock(dentry, AuLock_IR); if (!err) err = open(file, vfsub_file_flags(file)); di_read_unlock(dentry, AuLock_IR); finfo = au_fi(file); if (!err) { finfo->fi_file = file; au_sphl_add(&finfo->fi_hlist, &au_sbi(file->f_path.dentry->d_sb)->si_files); } fi_write_unlock(file); if (unlikely(err)) { finfo->fi_hdir = NULL; au_finfo_fin(file); } out: return err; }
/* * Check if the die sensor is cooling down. If it's higher than * t_hot since the last throttle then throttle it again. * OMAP junction temperature could stay for a long time in an * unacceptable temperature range. The idea here is to check after * t_hot->throttle the system really came below t_hot else re-throttle * and keep doing till it's under t_hot temp range. */ static void throttle_delayed_work_fn(struct work_struct *work) { int curr; struct omap_temp_sensor *temp_sensor = container_of(work, struct omap_temp_sensor, throttle_work.work); curr = omap_read_current_temp(temp_sensor); #ifdef CONFIG_OMAP_TEMP_CONTROL if (curr >= temp_limit || curr < 0) { #else if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) { #endif pr_warn("%s: OMAP temp read %d exceeds the threshold\n", __func__, curr); omap_thermal_throttle(); schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); } else { schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); } } static irqreturn_t omap_tshut_irq_handler(int irq, void *data) { struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data; /* Need to handle thermal mgmt in bootloader * to avoid restart again at kernel level */ if (temp_sensor->is_efuse_valid) { pr_emerg("%s: Thermal shutdown reached rebooting device\n", __func__); kernel_restart(NULL); } else { pr_err("%s:Invalid EFUSE, Non-trimmed BGAP\n", __func__); } return IRQ_HANDLED; } static irqreturn_t omap_talert_irq_handler(int irq, void *data) { struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data; int t_hot, t_cold, temp_offset; t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET) & OMAP4_HOT_FLAG_MASK; t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET) & OMAP4_COLD_FLAG_MASK; temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); if (t_hot) { omap_thermal_throttle(); schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); temp_offset &= ~(OMAP4_MASK_HOT_MASK); temp_offset |= OMAP4_MASK_COLD_MASK; } else if (t_cold) { cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_thermal_unthrottle(); temp_offset &= ~(OMAP4_MASK_COLD_MASK); temp_offset |= OMAP4_MASK_HOT_MASK; } omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET); return IRQ_HANDLED; } static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data; struct omap_temp_sensor *temp_sensor; struct resource *mem; int ret = 0, val; if (!pdata) { dev_err(dev, "%s: platform data missing\n", __func__); return -EINVAL; } temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL); if (!temp_sensor) return -ENOMEM; spin_lock_init(&temp_sensor->lock); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "%s:no mem resource\n", __func__); ret = -EINVAL; goto plat_res_err; } temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert"); if (temp_sensor->irq < 0) { dev_err(dev, "%s:Cannot get thermal alert irq\n", __func__); ret = -EINVAL; goto get_irq_err; } ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN, "thermal_shutdown"); if (ret) { dev_err(dev, "%s: Could not get tshut_gpio\n", __func__); goto tshut_gpio_req_err; } temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO); if (temp_sensor->tshut_irq < 0) { dev_err(dev, "%s:Cannot get thermal shutdown irq\n", __func__); ret = -EINVAL; goto get_tshut_irq_err; } temp_sensor->phy_base = pdata->offset; temp_sensor->pdev = pdev; temp_sensor->dev = dev; pm_runtime_enable(dev); pm_runtime_irq_safe(dev); /* * check if the efuse has a non-zero value if not * it is an untrimmed sample and the temperatures * may not be accurate */ if (omap_readl(OMAP4_CTRL_MODULE_CORE + OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP)) temp_sensor->is_efuse_valid = 1; temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck"); if (IS_ERR(temp_sensor->clock)) { ret = PTR_ERR(temp_sensor->clock); pr_err("%s:Unable to get fclk: %d\n", __func__, ret); ret = -EINVAL; goto clk_get_err; } /* Init delayed work for throttle decision */ INIT_DELAYED_WORK(&temp_sensor->throttle_work, throttle_delayed_work_fn); platform_set_drvdata(pdev, temp_sensor); ret = omap_temp_sensor_enable(temp_sensor); if (ret) { dev_err(dev, "%s:Cannot enable temp sensor\n", __func__); goto sensor_enable_err; } omap_enable_continuous_mode(temp_sensor); omap_configure_temp_sensor_thresholds(temp_sensor); /* 1 ms */ omap_configure_temp_sensor_counter(temp_sensor, 1); /* Wait till the first conversion is done wait for at least 1ms */ mdelay(2); /* Read the temperature once due to hw issue*/ omap_read_current_temp(temp_sensor); /* Set 2 seconds time as default counter */ omap_configure_temp_sensor_counter(temp_sensor, temp_sensor->clk_rate * 2); ret = request_threaded_irq(temp_sensor->irq, NULL, omap_talert_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "temp_sensor", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed.\n"); goto req_irq_err; } ret = request_threaded_irq(temp_sensor->tshut_irq, NULL, omap_tshut_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "tshut", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed for TSHUT.\n"); goto tshut_irq_req_err; } ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group); if (ret) { dev_err(&pdev->dev, "could not create sysfs files\n"); goto sysfs_create_err; } /* unmask the T_COLD and unmask T_HOT at init */ val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); val |= OMAP4_MASK_COLD_MASK; val |= OMAP4_MASK_HOT_MASK; omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET); dev_info(dev, "%s probed", pdata->name); temp_sensor_pm = temp_sensor; #ifdef CONFIG_OMAP_TEMP_CONTROL ctrl_sensor = temp_sensor; tempcontrol_registerlimit(temp_limit); #endif return 0; sysfs_create_err: free_irq(temp_sensor->tshut_irq, temp_sensor); cancel_delayed_work_sync(&temp_sensor->throttle_work); tshut_irq_req_err: free_irq(temp_sensor->irq, temp_sensor); req_irq_err: platform_set_drvdata(pdev, NULL); omap_temp_sensor_disable(temp_sensor); sensor_enable_err: clk_put(temp_sensor->clock); clk_get_err: pm_runtime_disable(dev); get_tshut_irq_err: gpio_free(OMAP_TSHUT_GPIO); tshut_gpio_req_err: get_irq_err: plat_res_err: kfree(temp_sensor); return ret; } static int __devexit omap_temp_sensor_remove(struct platform_device *pdev) { struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &omap_temp_sensor_group); cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_temp_sensor_disable(temp_sensor); clk_put(temp_sensor->clock); platform_set_drvdata(pdev, NULL); if (temp_sensor->irq) free_irq(temp_sensor->irq, temp_sensor); if (temp_sensor->tshut_irq) free_irq(temp_sensor->tshut_irq, temp_sensor); kfree(temp_sensor); return 0; } #ifdef CONFIG_PM static void omap_temp_sensor_save_ctxt(struct omap_temp_sensor *temp_sensor) { temp_sensor_context.temp_sensor_ctrl = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET); temp_sensor_context.bg_ctrl = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); temp_sensor_context.bg_counter = omap_temp_sensor_readl(temp_sensor, BGAP_COUNTER_OFFSET); temp_sensor_context.bg_threshold = omap_temp_sensor_readl(temp_sensor, BGAP_THRESHOLD_OFFSET); temp_sensor_context.temp_sensor_tshut_threshold = omap_temp_sensor_readl(temp_sensor, BGAP_TSHUT_OFFSET); }
static int max14577_muic_handle_attach(struct max14577_muic_data *muic_data, enum muic_attached_dev new_dev) { int ret = 0; if (new_dev == muic_data->attached_dev) { pr_info("%s:%s Duplicated(%d), just ignore\n", MUIC_DEV_NAME, __func__, muic_data->attached_dev); return ret; } ret = max14577_muic_logically_detach(muic_data, new_dev); if (ret) pr_warn("%s:%s fail to logically detach(%d)\n", MUIC_DEV_NAME, __func__, ret); if (muic_data->mfd_pdata->set_gpio_pogo_cb) { /* VBATT <-> VBUS voltage switching, along to new_dev. */ ret = muic_data->mfd_pdata->set_gpio_pogo_cb(new_dev); if (ret) pr_warn("%s:%s fail to VBATT <-> VBUS voltage switching(%d)\n", MUIC_DEV_NAME, __func__, ret); } switch (new_dev) { case ATTACHED_DEV_JIG_UART_OFF_MUIC: ret = write_vps_regs(muic_data, new_dev); muic_data->attached_dev = new_dev; max14577_muic_attach_callback_jig(muic_data); break; case ATTACHED_DEV_JIG_UART_OFF_VB_MUIC: case ATTACHED_DEV_TA_MUIC: #if defined(CONFIG_MUIC_SUPPORT_POGO_TA) case ATTACHED_DEV_POGO_TA_MUIC: #endif /* CONFIG_MUIC_SUPPORT_POGO_TA */ ret = write_vps_regs(muic_data, new_dev); muic_data->attached_dev = new_dev; max14577_muic_attach_callback_jig(muic_data); max14577_muic_attach_callback_chg(muic_data); break; case ATTACHED_DEV_JIG_USB_ON_MUIC: case ATTACHED_DEV_JIG_USB_OFF_MUIC: case ATTACHED_DEV_USB_MUIC: case ATTACHED_DEV_CDP_MUIC: #if defined(CONFIG_MUIC_SUPPORT_POGO_USB) case ATTACHED_DEV_POGO_USB_MUIC: #endif /* CONFIG_MUIC_SUPPORT_POGO_USB */ ret = write_vps_regs(muic_data, new_dev); muic_data->attached_dev = new_dev; max14577_muic_attach_callback_usb(muic_data); max14577_muic_attach_callback_chg(muic_data); break; #if defined(CONFIG_SEC_FACTORY_MODE) case ATTACHED_DEV_JIG_UART_ON_MUIC: ret = write_vps_regs(muic_data, new_dev); muic_data->attached_dev = new_dev; max14577_muic_attach_callback_dock(muic_data, MUIC_DOCK_AUDIODOCK); break; #endif /* CONFIG_SEC_FACTORY_MODE */ #if defined(CONFIG_MUIC_SUPPORT_FACTORY_BUTTON) case ATTACHED_DEV_FACTORY_BUTTON: ret = write_vps_regs(muic_data, new_dev); muic_data->attached_dev = new_dev; max14577_muic_attach_callback_dock(muic_data, MUIC_DOCK_FACTORY); break; #endif /* CONFIG_MUIC_SUPPORT_FACTORY_BUTTON */ default: pr_warn("%s:%s unsupported dev(%d)\n", MUIC_DEV_NAME, __func__, new_dev); break; } return ret; }
static int convert_log(struct mc_info *mi) { struct mcinfo_common *mic; struct mcinfo_global *mc_global; struct mcinfo_bank *mc_bank; struct xen_mce m; uint32_t i; mic = NULL; x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL); if (unlikely(!mic)) { pr_warn("Failed to find global error info\n"); return -ENODEV; } memset(&m, 0, sizeof(struct xen_mce)); mc_global = (struct mcinfo_global *)mic; m.mcgstatus = mc_global->mc_gstatus; m.apicid = mc_global->mc_apicid; for (i = 0; i < ncpus; i++) if (g_physinfo[i].mc_apicid == m.apicid) break; if (unlikely(i == ncpus)) { pr_warn("Failed to match cpu with apicid %d\n", m.apicid); return -ENODEV; } m.socketid = g_physinfo[i].mc_chipid; m.cpu = m.extcpu = g_physinfo[i].mc_cpunr; m.cpuvendor = (__u8)g_physinfo[i].mc_vendor; m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value; mic = NULL; x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK); if (unlikely(!mic)) { pr_warn("Fail to find bank error info\n"); return -ENODEV; } do { if ((!mic) || (mic->size == 0) || (mic->type != MC_TYPE_GLOBAL && mic->type != MC_TYPE_BANK && mic->type != MC_TYPE_EXTENDED && mic->type != MC_TYPE_RECOVERY)) break; if (mic->type == MC_TYPE_BANK) { mc_bank = (struct mcinfo_bank *)mic; m.misc = mc_bank->mc_misc; m.status = mc_bank->mc_status; m.addr = mc_bank->mc_addr; m.tsc = mc_bank->mc_tsc; m.bank = mc_bank->mc_bank; m.finished = 1; /*log this record*/ xen_mce_log(&m); } mic = x86_mcinfo_next(mic); } while (1); return 0; }
static void __init imx6q_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; int i, irq; int ret; clk[dummy] = imx_clk_fixed("dummy", 0); clk[ckil] = imx_obtain_fixed_clock("ckil", 0); clk[ckih] = imx_obtain_fixed_clock("ckih1", 0); clk[osc] = imx_obtain_fixed_clock("osc", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); base = of_iomap(np, 0); WARN_ON(!base); /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { post_div_table[1].div = 1; post_div_table[2].div = 1; video_div_table[1].div = 1; video_div_table[2].div = 1; }; /* type name parent_name base div_mask */ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f); clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1); clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3); clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f); clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f); clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3); clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x3); /* * Bit 20 is the reserved and read-only bit, we do this only for: * - Do nothing for usbphy clk_enable/disable * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework may need to enable/disable usbphy's parent */ clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ clk[usbphy1_gate] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); clk[usbphy2_gate] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5); clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4); clk[sata_ref_100m] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20); clk[pcie_ref_125m] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); clk[enet_ref] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); /* * lvds1_gate and lvds2_gate are pseudo-gates. Both can be * independently configured as clock inputs or outputs. We treat * the "output_enable" bit as a gate, even though it's really just * enabling clock output. */ clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10); clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11); /* name parent_name reg idx */ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2); clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); clk[pll5_post_div] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); imx6q_pm_set_ccm_base(base); /* name reg shift width parent_names num_parents */ clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels)); clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels)); clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels)); clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels)); clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels)); clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[ssi2_sel] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[ssi3_sel] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[usdhc1_sel] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc2_sel] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc3_sel] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc4_sel] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); clk[emi_sel] = imx_clk_fixup_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels), imx_cscmr1_fixup); clk[emi_slow_sel] = imx_clk_fixup_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels), imx_cscmr1_fixup); clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); clk[cko2_sel] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); clk[cko] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); /* name reg shift width busy: reg, shift parent_names num_parents */ clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width */ clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); clk[ipg_per] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3); clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3); clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6); clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); clk[ldb_di0_podf] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0); clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); clk[ldb_di1_podf] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0); clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3); clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3); clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); clk[emi_podf] = imx_clk_fixup_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); clk[emi_slow_podf] = imx_clk_fixup_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); /* name parent_name reg shift width busy: reg, shift */ clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4); clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); /* name parent_name reg shift */ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4); clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6); clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20); clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8); clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10); clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16); clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20); clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22); if (cpu_is_imx6dl()) /* * The multiplexer and divider of imx6q clock gpu3d_shader get * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl. */ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24); else clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4); clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12); clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14); clk[vdoa] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26); clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0); clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2); clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4); clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6); clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8); clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12); clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); if (cpu_is_imx6dl()) /* * The multiplexer and divider of the imx6q clock gpu2d get * redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl. */ clk[mlb] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18); else clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30); clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0); clk[per1_bch] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12); clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16); clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18); clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20); clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22); clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28); clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4); clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14); clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18); clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20); clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22); clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24); clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26); clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); clk[eim_slow] = imx_clk_gate2("eim_slow", "emi_slow_podf", base + 0x80, 10); clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); clk[cko2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); for (i = 0; i < ARRAY_SIZE(clk); i++) if (IS_ERR(clk[i])) pr_err("i.MX6q clk %d: register failed with %ld\n", i, PTR_ERR(clk[i])); clk_data.clks = clk; clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); clk_register_clkdev(clk[arm], NULL, "cpu0"); clk_register_clkdev(clk[pll4_post_div], "pll4_post_div", NULL); clk_register_clkdev(clk[pll4_audio], "pll4_audio", NULL); if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) || cpu_is_imx6dl()) { clk_set_parent(clk[ldb_di0_sel], clk[pll5_video_div]); clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]); } /* * The gpmi needs 100MHz frequency in the EDO/Sync mode, * We can not get the 100MHz from the pll2_pfd0_352m. * So choose pll2_pfd2_396m as enfc_sel's parent. */ clk_set_parent(clk[enfc_sel], clk[pll2_pfd2_396m]); for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clk[clks_init_on[i]]); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { clk_prepare_enable(clk[usbphy1_gate]); clk_prepare_enable(clk[usbphy2_gate]); } /* * Let's initially set up CLKO with OSC24M, since this configuration * is widely used by imx6q board designs to clock audio codec. */ ret = clk_set_parent(clk[cko2_sel], clk[osc]); if (!ret) ret = clk_set_parent(clk[cko], clk[cko2]); if (ret) pr_warn("failed to set up CLKO: %d\n", ret); /* All existing boards with PCIe use LVDS1 */ if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); irq = irq_of_parse_and_map(np, 0); mxc_timer_init(base, irq); }
int mdp3_ctrl_init(struct msm_fb_data_type *mfd) { struct device *dev = mfd->fbi->dev; struct msm_mdp_interface *mdp3_interface = &mfd->mdp; struct mdp3_session_data *mdp3_session = NULL; u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO; int rc; int splash_mismatch = 0; pr_debug("mdp3_ctrl_init\n"); rc = mdp3_parse_dt_splash(mfd); if (rc) splash_mismatch = 1; mdp3_interface->on_fnc = mdp3_ctrl_on; mdp3_interface->off_fnc = mdp3_ctrl_off; mdp3_interface->do_histogram = NULL; mdp3_interface->cursor_update = NULL; mdp3_interface->dma_fnc = mdp3_ctrl_pan_display; mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler; mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff; mdp3_interface->lut_update = mdp3_ctrl_lut_update; mdp3_interface->configure_panel = mdp3_update_panel_info; mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL); if (!mdp3_session) { pr_err("fail to allocate mdp3 private data structure"); return -ENOMEM; } memset(mdp3_session, 0, sizeof(struct mdp3_session_data)); mutex_init(&mdp3_session->lock); INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off); INIT_WORK(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done); atomic_set(&mdp3_session->vsync_countdown, 0); mutex_init(&mdp3_session->histo_lock); mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL); if (!mdp3_session->dma) { rc = -ENODEV; goto init_done; } rc = mdp3_dma_init(mdp3_session->dma); if (rc) { pr_err("fail to init dma\n"); goto init_done; } intf_type = mdp3_ctrl_get_intf_type(mfd); mdp3_session->intf = mdp3_get_display_intf(intf_type); if (!mdp3_session->intf) { rc = -ENODEV; goto init_done; } rc = mdp3_intf_init(mdp3_session->intf); if (rc) { pr_err("fail to init interface\n"); goto init_done; } mdp3_session->dma->output_config.out_sel = intf_type; mdp3_session->mfd = mfd; mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev); mdp3_session->status = mdp3_session->intf->active; mdp3_session->overlay.id = MSMFB_NEW_REQUEST; mdp3_bufq_init(&mdp3_session->bufq_in); mdp3_bufq_init(&mdp3_session->bufq_out); mdp3_session->histo_status = 0; mdp3_session->lut_sel = 0; BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head); init_timer(&mdp3_session->vsync_timer); mdp3_session->vsync_timer.function = mdp3_vsync_timer_func; mdp3_session->vsync_timer.data = (u32)mdp3_session; mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate; mfd->mdp.private1 = mdp3_session; init_completion(&mdp3_session->dma_completion); if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done; rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group); if (rc) { pr_err("vsync sysfs group creation failed, ret=%d\n", rc); goto init_done; } mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "vsync_event"); if (!mdp3_session->vsync_event_sd) { pr_err("vsync_event sysfs lookup failed\n"); rc = -ENODEV; goto init_done; } rc = mdp3_create_sysfs_link(dev); if (rc) pr_warn("problem creating link to mdp sysfs\n"); kobject_uevent(&dev->kobj, KOBJ_ADD); pr_debug("vsync kobject_uevent(KOBJ_ADD)\n"); if (mdp3_get_cont_spash_en()) { mdp3_session->clk_on = 1; mdp3_session->in_splash_screen = 1; mdp3_ctrl_notifier_register(mdp3_session, &mdp3_session->mfd->mdp_sync_pt_data.notifier); } if (splash_mismatch) { pr_err("splash memory mismatch, stop splash\n"); mdp3_ctrl_off(mfd); } mdp3_session->vsync_before_commit = true; init_done: if (IS_ERR_VALUE(rc)) kfree(mdp3_session); return rc; }
static ssize_t gic_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { void __iomem *dist_base = gic_get_dist_base(); char *p; size_t ret, buf_len; u32 gic_irqs; u32 value; int i, len = 0; p = (char *)__get_free_pages(GFP_KERNEL, 0); if (!p) return -ENOMEM; buf_len = (PAGE_SIZE - 1); if (gic_offset != -1) { value = readl_relaxed(dist_base + gic_offset); len += snprintf(p + len, buf_len - len, "offset[0x%x]: 0x%08x\n", gic_offset, value); } else { gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f; gic_irqs = (gic_irqs + 1) * 32; if (gic_irqs > 1020) gic_irqs = 1020; value = readl_relaxed(dist_base + GIC_DIST_CTRL); len += snprintf(p + len, buf_len - len, "Dist Control Register: 0x%08x\n", value); for (i = 32; i < gic_irqs; i += 4) { value = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4 / 4); len += snprintf(p + len, buf_len - len, "Target setting[%d]: 0x%08x\n", i / 4, value); } for (i = 32; i < gic_irqs; i += 32) { value = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4 / 32); len += snprintf(p + len, buf_len - len, "Enable setting[%d]: 0x%08x\n", i / 32, value); } for (i = 32; i < gic_irqs; i += 32) { value = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4 / 32); len += snprintf(p + len, buf_len - len, "Pending status[%d]: 0x%08x\n", i / 32, value); } for (i = 32; i < gic_irqs; i += 32) { value = readl_relaxed(dist_base + GIC_DIST_STATUS + i * 4 / 32); len += snprintf(p + len, buf_len - len, "SPI status[%d]: 0x%08x\n", i / 32, value); } } if (len == buf_len) pr_warn("The buffer for dumpping gic is full now!\n"); ret = simple_read_from_buffer(buffer, count, ppos, p, len); free_pages((unsigned long)p, 0); if (gic_offset != -1 && !ret) gic_offset = -1; return ret; }
static ssize_t do_test_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char cmdbuf[64] = {0}; char irqname[16] = {0}; int ret; int irq; int i; if (count >= sizeof(cmdbuf)) count = sizeof(cmdbuf) - 1; if(copy_from_user(cmdbuf, user_buf, count)) pr_warn("test_hi64xx_irq: user buffer is not completely copied"); ret = kstrtoint(cmdbuf, 10, &irq); if (ret != 0) { pr_err("test_hi64xx_irq: invalid input"); return 0; } if (irq == 255) { ret = hi64xx_irq_init_irq(hi64xx_irq_data, &test_irq_map); if (ret) { dev_err(test_device, "hi64xx_irq_init_irq failed: %d", ret); } else { dev_info(test_device, "hi64xx_irq_init_irq OK"); } } else if (irq == 254) { int irqs[] = {0, 7, 8, 15, 16, 23, 24, 31}; for (i = 0; i < ARRAY_SIZE(irqs); i++) { snprintf(irqname, sizeof(irqname) - 1, "testirq%d", irqs[i]); ret = hi64xx_irq_request_irq(hi64xx_irq_data, irqs[i], test_irq_handler, irqname, NULL); if (ret) dev_info(test_device, "hi64xx_irq_request_irq failed: %d", ret); } hi64xx_irq_disable_irqs(hi64xx_irq_data, ARRAY_SIZE(irqs), irqs); hi64xx_irq_enable_irqs(hi64xx_irq_data, ARRAY_SIZE(irqs), irqs); hi64xx_irq_disable_irqs(hi64xx_irq_data, ARRAY_SIZE(irqs), irqs); } else { snprintf(irqname, sizeof(irqname) - 1, "testirq%d", irq); ret = hi64xx_irq_request_irq(hi64xx_irq_data, irq, test_irq_handler, irqname, NULL); if (ret) dev_info(test_device, "hi64xx_irq_request_irq failed: %d", ret); ret = hi64xx_irq_disable_irq(hi64xx_irq_data, irq); if (ret) dev_info(test_device, "hi64xx_irq_disable_irq failed: %d", ret); ret = hi64xx_irq_enable_irq(hi64xx_irq_data, irq); if (ret) dev_info(test_device, "hi64xx_irq_enable_irq failed: %d", ret); ret = hi64xx_irq_disable_irq(hi64xx_irq_data, irq); if (ret) dev_info(test_device, "hi64xx_irq_disable_irq failed: %d", ret); } return count; }
static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd, int32 ptype, struct vsycn_ctrl *vctrl) { int ret = 0; struct fb_info *fbi = mfd->fbi; struct mdp4_overlay_pipe *pipe; if (vctrl->base_pipe != NULL) return; pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1); if (pipe == NULL) { pr_err("%s: pipe_alloc failed\n", __func__); return; } pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER1; if (ptype == OVERLAY_TYPE_BF) { MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004, ((0x0 & 0xFFF) << 16) | (0x0 & 0xFFF)); MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008, (0x0 & 0xFFF)); pipe->src_format = MDP_ARGB_8888; } else { switch (mfd->ibuf.bpp) { case 2: pipe->src_format = MDP_RGB_565; break; case 3: pipe->src_format = MDP_RGB_888; break; case 4: default: if (hdmi_prim_display) pipe->src_format = MSMFB_DEFAULT_TYPE; else pipe->src_format = MDP_ARGB_8888; break; } } pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->src_y = 0; pipe->src_x = 0; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->srcp0_ystride = fbi->fix.line_length; mdp4_overlay_mdp_pipe_req(pipe, mfd); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) pr_warn("%s: format2type failed\n", __func__); mdp4_overlay_dmae_xy(pipe); mdp4_overlayproc_cfg(pipe); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { pipe->srcp0_addr = (uint32) mfd->ibuf.buf; mdp4_overlay_rgb_setup(pipe); } mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe); mdp4_mixer_stage_commit(pipe->mixer_num); vctrl->base_pipe = pipe; }
static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned int uiCurrentFreqCount; volatile int polling_count = 100000; volatile int i = 0; unsigned int code; /// 1. Delay 0.01ms before power off for (i=0; i < DELAY_LOOP_COUNT;i++); if (DELAY_LOOP_COUNT != i) { pr_warn("[MALI] power off delay error!\n"); } /// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_warn("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } #if HARD_RESET_AT_POWER_OFF /* Cause a GPU hard reset to test whether we have actually idled the GPU * and that we properly reconfigure the GPU on power up. * Usually this would be dangerous, but if the GPU is working correctly it should * be completely safe as the GPU should not be active at this point. * However this is disabled normally because it will most likely interfere with * bus logging etc. */ //KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); /// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { printk("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } g_power_status = 0; // the power status is "power off". g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index. //printk("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx ); #if 1 uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq. #endif code = mt_get_chip_hw_code(); /* MTK clock modified */ if (0x321 == code) { // do something for Denali-1(6735) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #else clk_disable_unprepare(kbdev->clk_mfg); clk_disable_unprepare(kbdev->clk_mfg_scp); clk_disable_unprepare(kbdev->clk_smi_common); clk_disable_unprepare(kbdev->clk_display_scp); #endif } else if (0x335 == code) { // do something for Denali-2(6735M) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else if (0x337 == code) { // do something for Denali-3(6753) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else { // unknown chip ID, error !! #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } if(mt6325_upmu_get_swcid() >= PMIC6325_E3_CID_CODE) { mt_gpufreq_voltage_enable_set(0); } #ifdef ENABLE_COMMON_DVFS ged_dvfs_gpu_clock_switch_notify(0); #endif #endif }
int check_fwbl(struct ssp_data *data) { unsigned int fw_revision; pr_info("[SSP] change_rev = %d\n", data->ssp_changes); #if defined (CONFIG_MACH_JSGLTE_CHN_CMCC) fw_revision = SSP_FIRMWARE_REVISION_JSGLTE; #elif defined (CONFIG_MACH_VIKALCU) fw_revision = SSP_FIRMWARE_REVISION_STM; #elif defined (CONFIG_MACH_HLTEVZW) || defined (CONFIG_MACH_HLTESPR) \ || defined (CONFIG_MACH_HLTEUSC) if (data->ap_rev > 3) fw_revision = SSP_FIRMWARE_REVISION_STM; else if (data->ap_rev > 2) fw_revision = SSP_FIRMWARE_REVISION_STM_88921; else fw_revision = SSP_FIRMWARE_REVISION_STM_RVS; #else if (data->ap_rev > 3) fw_revision = SSP_FIRMWARE_REVISION_STM; else fw_revision = SSP_FIRMWARE_REVISION_STM_88921; #endif data->uCurFirmRev = get_firmware_rev(data); if ((data->uCurFirmRev == SSP_INVALID_REVISION) \ || (data->uCurFirmRev == SSP_INVALID_REVISION2)) { #if STM_SHOULD_BE_IMPLEMENT data->client->addr = BOOTLOADER_SLAVE_ADDR; iRet = check_bootloader(data->client, BL_WAITING_BOOTLOAD_CMD); if (iRet >= 0) { pr_info("[SSP] ssp_load_fw_bootmode\n"); return FW_DL_STATE_NEED_TO_SCHEDULE; } else { pr_warn("[SSP] Firm Rev is invalid(%8u). Retry.\n", data->uCurFirmRev); data->client->addr = APP_SLAVE_ADDR; data->uCurFirmRev = get_firmware_rev(data); if (data->uCurFirmRev == SSP_INVALID_REVISION ||\ data->uCurFirmRev == ERROR) { pr_err("[SSP] MCU is not working, FW download failed\n"); return FW_DL_STATE_FAIL; } else if (data->uCurFirmRev != fw_revision) { pr_info("[SSP] MCU Firm Rev : Old = %8u, New = %8u\n", data->uCurFirmRev, fw_revision); return FW_DL_STATE_NEED_TO_SCHEDULE; } pr_info("[SSP] MCU Firm Rev : Old = %8u, New = %8u\n", data->uCurFirmRev, fw_revision); } #endif pr_err("[SSP] SSP_INVALID_REVISION\n"); return FW_DL_STATE_NEED_TO_SCHEDULE; } else { if (data->uCurFirmRev != fw_revision) { pr_info("[SSP] MCU Firm Rev : Old = %8u, New = %8u\n", data->uCurFirmRev, fw_revision); return FW_DL_STATE_NEED_TO_SCHEDULE; } pr_info("[SSP] MCU Firm Rev : Old = %8u, New = %8u\n", data->uCurFirmRev, fw_revision); } return FW_DL_STATE_NONE; }
static int push_pxx_to_hypervisor(struct acpi_processor *_pr) { int ret = 0; struct xen_platform_op op = { .cmd = XENPF_set_processor_pminfo, .interface_version = XENPF_INTERFACE_VERSION, .u.set_pminfo.id = _pr->acpi_id, .u.set_pminfo.type = XEN_PM_PX, }; struct xen_processor_performance *dst_perf; struct xen_processor_px *dst_states = NULL; dst_perf = &op.u.set_pminfo.perf; dst_perf->platform_limit = _pr->performance_platform_limit; dst_perf->flags |= XEN_PX_PPC; xen_copy_pct_data(&(_pr->performance->control_register), &dst_perf->control_register); xen_copy_pct_data(&(_pr->performance->status_register), &dst_perf->status_register); dst_perf->flags |= XEN_PX_PCT; dst_states = xen_copy_pss_data(_pr, dst_perf); if (!IS_ERR_OR_NULL(dst_states)) { set_xen_guest_handle(dst_perf->states, dst_states); dst_perf->flags |= XEN_PX_PSS; } if (!xen_copy_psd_data(_pr, dst_perf)) dst_perf->flags |= XEN_PX_PSD; if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) { pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n", _pr->acpi_id, dst_perf->flags); ret = -ENODEV; goto err_free; } if (!no_hypercall) ret = HYPERVISOR_dom0_op(&op); if (!ret) { struct acpi_processor_performance *perf; unsigned int i; perf = _pr->performance; pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id); for (i = 0; i < perf->state_count; i++) { pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); } } else if (ret != -EINVAL) /* EINVAL means the ACPI ID is incorrect - meaning the ACPI * table is referencing a non-existing CPU - which can happen * with broken ACPI tables. */ pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n", ret, _pr->acpi_id); err_free: if (!IS_ERR_OR_NULL(dst_states)) kfree(dst_states); return ret; } static int upload_pm_data(struct acpi_processor *_pr) { int err = 0; mutex_lock(&acpi_ids_mutex); if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) { mutex_unlock(&acpi_ids_mutex); return -EBUSY; } if (_pr->flags.power) err = push_cxx_to_hypervisor(_pr); if (_pr->performance && _pr->performance->states) err |= push_pxx_to_hypervisor(_pr); mutex_unlock(&acpi_ids_mutex); return err; }
static int read_firmware(struct mtd_info *mtd, int first_page, int num_pages, void **firmware) { void *buf, *pos; int pages_per_block = mtd->erasesize / mtd->writesize; int now, size, block, ret, need_cleaning = 0; pr_debug("%s: reading %d pages from page %d\n", __func__, num_pages, first_page); buf = pos = malloc(num_pages * mtd->writesize); if (!buf) return -ENOMEM; if (first_page % pages_per_block) { pr_err("Firmware does not begin on eraseblock boundary\n"); ret = -EINVAL; goto err; } block = first_page / pages_per_block; size = num_pages * mtd->writesize; while (size) { if (block >= mtd_num_pebs(mtd)) { ret = -EIO; goto err; } if (mtd_peb_is_bad(mtd, block)) { block++; continue; } now = min_t(unsigned int , size, mtd->erasesize); ret = mtd_peb_read(mtd, pos, block, 0, now); if (ret == -EUCLEAN) { pr_info("Block %d needs cleaning\n", block); need_cleaning = 1; } else if (ret < 0) { pr_err("Reading PEB %d failed with %d\n", block, ret); goto err; } if (mtd_buf_all_ff(pos, now)) { /* * At this point we do not know if this is a * block that contains only 0xff or if it is * really empty. We test this by reading a raw * page and check if it's empty */ ret = block_is_empty(mtd, block); if (ret < 0) goto err; if (ret) { ret = -EINVAL; goto err; } } pos += now; size -= now; block++; } ret = 0; *firmware = buf; pr_info("Firmware @ page %d, size %d pages has crc32: 0x%08x\n", first_page, num_pages, crc32(0, buf, num_pages * mtd->writesize)); err: if (ret < 0) { free(buf); pr_warn("Firmware at page %d is not readable\n", first_page); return ret; } if (need_cleaning) { pr_warn("Firmware at page %d needs cleanup\n", first_page); return -EUCLEAN; } return 0; }
void tegra_init_speedo_data(void) { int i; if (!tegra_platform_is_silicon()) { cpu_process_id = 0; core_process_id = 0; gpu_process_id = 0; cpu_speedo_id = 0; soc_speedo_id = 0; gpu_speedo_id = 0; package_id = -1; cpu_speedo_value = 1777; gpu_speedo_value = 2000; cpu_speedo_0_value = 0; cpu_speedo_1_value = 0; soc_speedo_0_value = 0; soc_speedo_1_value = 0; soc_speedo_2_value = 0; soc_iddq_value = 0; gpu_iddq_value = 0; return; } cpu_speedo_0_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_0); cpu_speedo_1_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_1); /* GPU Speedo is stored in CPU_SPEEDO_2 */ gpu_speedo_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_2); soc_speedo_0_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_0); soc_speedo_1_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_1); soc_speedo_2_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_2); cpu_iddq_value = tegra_fuse_readl(FUSE_CPU_IDDQ); soc_iddq_value = tegra_fuse_readl(FUSE_SOC_IDDQ); gpu_iddq_value = tegra_fuse_readl(FUSE_GPU_IDDQ); /* cpu_speedo_value = TEGRA124_CPU_SPEEDO; */ cpu_speedo_value = cpu_speedo_0_value; if (cpu_speedo_value == 0) { cpu_speedo_value = 1900; pr_warn("Tegra12: Warning: Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra12: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } rev_sku_to_speedo_ids(tegra_revision, tegra_get_sku_id()); for (i = 0; i < GPU_PROCESS_CORNERS_NUM; i++) { if (gpu_speedo_value < gpu_process_speedos[threshold_index][i]) { break; } } gpu_process_id = i; for (i = 0; i < CPU_PROCESS_CORNERS_NUM; i++) { if (cpu_speedo_value < cpu_process_speedos[threshold_index][i]) { break; } } cpu_process_id = i; for (i = 0; i < CORE_PROCESS_CORNERS_NUM; i++) { if (soc_speedo_0_value < core_process_speedos[threshold_index][i]) { break; } } core_process_id = i; pr_info("Tegra12: CPU Speedo value %d, Soc Speedo value %d, Gpu Speedo value %d\n", cpu_speedo_value, soc_speedo_0_value, gpu_speedo_value); pr_info("Tegra12: CPU Speedo ID %d, Soc Speedo ID %d, Gpu Speedo ID %d\n", cpu_speedo_id, soc_speedo_id, gpu_speedo_id); pr_info("Tegra12: CPU Process ID %d,Soc Process ID %d,Gpu Process ID %d\n", cpu_process_id, core_process_id, gpu_process_id); }
static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) { substring_t args[MAX_OPT_ARGS]; char *p, *name; unsigned int opt; if (!data) return 0; while ((p = strsep(&data, ","))) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_override_compr: name = match_strdup(&args[0]); if (!name) return -ENOMEM; if (!strcmp(name, "none")) c->mount_opts.compr = JFFS2_COMPR_MODE_NONE; #ifdef CONFIG_JFFS2_LZO else if (!strcmp(name, "lzo")) c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO; #endif #ifdef CONFIG_JFFS2_ZLIB else if (!strcmp(name, "zlib")) c->mount_opts.compr = JFFS2_COMPR_MODE_FORCEZLIB; #endif else { pr_err("Error: unknown compressor \"%s\"\n", name); kfree(name); return -EINVAL; } kfree(name); c->mount_opts.override_compr = true; break; case Opt_rp_size: if (match_int(&args[0], &opt)) return -EINVAL; opt *= 1024; if (opt > c->mtd->size) { pr_warn("Too large reserve pool specified, max " "is %llu KB\n", c->mtd->size / 1024); return -EINVAL; } c->mount_opts.rp_size = opt; break; default: pr_err("Error: unrecognized mount option '%s' or missing value\n", p); return -EINVAL; } } return 0; }
/* called by the modify qp verb, this routine checks all the parameters before * making any changes */ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ? attr->cur_qp_state : qp->attr.qp_state; enum ib_qp_state new_state = (mask & IB_QP_STATE) ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask, IB_LINK_LAYER_ETHERNET)) { pr_warn("invalid mask or state for qp\n"); goto err1; } if (mask & IB_QP_STATE) { if (cur_state == IB_QPS_SQD) { if (qp->req.state == QP_STATE_DRAIN && new_state != IB_QPS_ERR) goto err1; } } if (mask & IB_QP_PORT) { if (attr->port_num != 1) { pr_warn("invalid port %d\n", attr->port_num); goto err1; } } if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) goto err1; if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr)) goto err1; if (mask & IB_QP_ALT_PATH) { if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr)) goto err1; if (attr->alt_port_num != 1) { pr_warn("invalid alt port %d\n", attr->alt_port_num); goto err1; } if (attr->alt_timeout > 31) { pr_warn("invalid QP alt timeout %d > 31\n", attr->alt_timeout); goto err1; } } if (mask & IB_QP_PATH_MTU) { struct rxe_port *port = &rxe->port; enum ib_mtu max_mtu = port->attr.max_mtu; enum ib_mtu mtu = attr->path_mtu; if (mtu > max_mtu) { pr_debug("invalid mtu (%d) > (%d)\n", ib_mtu_enum_to_int(mtu), ib_mtu_enum_to_int(max_mtu)); goto err1; } } if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) { pr_warn("invalid max_rd_atomic %d > %d\n", attr->max_rd_atomic, rxe->attr.max_qp_rd_atom); goto err1; } } if (mask & IB_QP_TIMEOUT) { if (attr->timeout > 31) { pr_warn("invalid QP timeout %d > 31\n", attr->timeout); goto err1; } } return 0; err1: return -EINVAL; }