static int tegra30_mc_resume(struct device *dev) { int i; struct tegra30_mc *mc = dev_get_drvdata(dev); for (i = 0; i < ARRAY_SIZE(tegra30_mc_ctx); i++) mc_writel(mc, mc->ctx[i], tegra30_mc_ctx[i]); mc_writel(mc, 1, MC_TIMING_CONTROL); /* Read-back to ensure that write reached */ mc_readl(mc, MC_TIMING_CONTROL); return 0; }
static inline void set_mc_arbiter_limits(void) { u32 reg = mc_readl(MC_EMEM_ARB_OUTSTANDING_REQ); u32 max_val = 0x50 << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT; if (!(reg & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) || ((reg & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > max_val)) { reg = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE | MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | max_val; mc_writel(reg, MC_EMEM_ARB_OUTSTANDING_REQ); mc_writel(0x1, MC_TIMING_CONTROL); } }
/* * The actual error handling takes longer than is ideal so this must be * threaded. */ static irqreturn_t tegra_mc_error_hard_irq(int irq, void *data) { u32 intr; err_channel = 0; intr = mc_readl(MC_INT_STATUS); /* * Sometimes the MC seems to generate spurious interrupts - that * is interrupts with an interrupt status register equal to 0. * Not much we can do other than keep a count of them. */ if (!intr) { spurious_intrs++; return IRQ_NONE; } trace_printk("MCERR detected.\n"); /* * We have an interrupt; disable the rest until this one is handled. * This means we will potentially miss interrupts. We can live with * that. */ mc_writel(0, MC_INT_MASK); mc_readl(MC_INT_STATUS); mc_intr = intr & mc_int_mask; return IRQ_WAKE_THREAD; }
static inline void disable_early_ack(u32 mc_override) { static u32 override_val; override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK); mc_writel(override_val, MC_EMEM_ARB_OVERRIDE); __cpuc_flush_dcache_area(&override_val, sizeof(override_val)); outer_clean_range(__pa(&override_val), __pa(&override_val + 1)); override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK; }
static irqreturn_t tegra30_mc_isr(int irq, void *data) { u32 stat, mask, bit; struct tegra30_mc *mc = data; stat = mc_readl(mc, MC_INTSTATUS); mask = mc_readl(mc, MC_INTMASK); mask &= stat; if (!mask) return IRQ_NONE; while ((bit = ffs(mask)) != 0) tegra30_mc_decode(mc, bit - 1); mc_writel(mc, stat, MC_INTSTATUS); return IRQ_HANDLED; }
void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate) { unsigned int i; struct tegra_mc_timing *timing = NULL; for (i = 0; i < mc->num_timings; i++) { if (mc->timings[i].rate == rate) { timing = &mc->timings[i]; break; } } if (!timing) { dev_err(mc->dev, "no memory timing registered for rate %lu\n", rate); return; } for (i = 0; i < mc->soc->num_emem_regs; ++i) mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]); }
static int __devinit tegra30_mc_probe(struct platform_device *pdev) { struct resource *irq; struct tegra30_mc *mc; size_t bytes; int err, i; u32 intmask; bytes = sizeof(*mc) + sizeof(u32) * ARRAY_SIZE(tegra30_mc_ctx); mc = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL); if (!mc) return -ENOMEM; mc->dev = &pdev->dev; for (i = 0; i < ARRAY_SIZE(mc->regs); i++) { struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) return -ENODEV; mc->regs[i] = devm_request_and_ioremap(&pdev->dev, res); if (!mc->regs[i]) return -EBUSY; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) return -ENODEV; err = devm_request_irq(&pdev->dev, irq->start, tegra30_mc_isr, IRQF_SHARED, dev_name(&pdev->dev), mc); if (err) return -ENODEV; platform_set_drvdata(pdev, mc); intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION; mc_writel(mc, intmask, MC_INTMASK); return 0; }
static inline void enable_early_ack(u32 mc_override) { mc_writel((mc_override | MC_EMEM_ARB_OVERRIDE_EACK_MASK), MC_EMEM_ARB_OVERRIDE); }
/* * This will always e successful. However, if something goes wrong in the * init a message will be printed to the kernel log. Since this is a * non-essential piece of the kernel no reason to fail the entire MC init * if this fails. */ int tegra_mcerr_init(struct dentry *mc_parent, struct platform_device *pdev) { int irq; const void *prop; irqreturn_t (*irq_top)(int irq, void *data) = tegra_mc_error_hard_irq; irqreturn_t (*irq_bot)(int irq, void *data) = tegra_mc_error_thread; chip_specific.mcerr_info = mcerr_default_info; chip_specific.mcerr_print = mcerr_default_print; chip_specific.mcerr_debugfs_show = mcerr_default_debugfs_show; chip_specific.nr_clients = 0; /* * mcerr_chip_specific_setup() can override any of the default * functions as it wishes. */ mcerr_chip_specific_setup(&chip_specific); if (chip_specific.nr_clients == 0 || chip_specific.intr_descriptions == NULL) { pr_err("Missing necessary chip_specific functionality!\n"); return -ENODEV; } if (tegra_mc_error_hard_irq_ovr) irq_top = tegra_mc_error_hard_irq_ovr; if (tegra_mc_error_thread_ovr) irq_bot = tegra_mc_error_thread_ovr; prop = of_get_property(pdev->dev.of_node, "int_mask", NULL); if (!prop) { pr_err("No int_mask prop for mcerr!\n"); return -EINVAL; } mc_int_mask = be32_to_cpup(prop); mc_writel(mc_int_mask, MC_INT_MASK); irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (irq < 0) { pr_err("Unable to parse/map MC error interrupt\n"); goto done; } if (request_threaded_irq(irq, irq_top, irq_bot, 0, "mc_status", NULL)) { pr_err("Unable to register MC error interrupt\n"); goto done; } if (!mc_parent) goto done; mcerr_debugfs_dir = debugfs_create_dir("err", mc_parent); if (mcerr_debugfs_dir == NULL) { pr_err("Failed to make debugfs node: %ld\n", PTR_ERR(mcerr_debugfs_dir)); goto done; } debugfs_create_file("mcerr", 0644, mcerr_debugfs_dir, NULL, &mcerr_debugfs_fops); debugfs_create_file("mcerr_throttle", S_IRUGO | S_IWUSR, mcerr_debugfs_dir, NULL, &mcerr_throttle_debugfs_fops); debugfs_create_u32("quiet", 0644, mcerr_debugfs_dir, &mcerr_silenced); done: return 0; }
/* * Common MC error handling code. */ static irqreturn_t tegra_mc_error_thread(int irq, void *data) { struct mc_client *client = NULL; const struct mc_error *fault; const char *smmu_info; unsigned long count; phys_addr_t addr; u32 status, intr = mc_intr; u32 write, secure; u32 client_id; cancel_delayed_work(&unthrottle_prints_work); if (intr & MC_INT_ARBITRATION_EMEM) { arb_intr(); if (intr == MC_INT_ARBITRATION_EMEM) goto out; intr &= ~MC_INT_ARBITRATION_EMEM; } count = atomic_inc_return(&error_count); fault = chip_specific.mcerr_info(intr & mc_int_mask); if (WARN(!fault, "Unknown error! intr sig: 0x%08x\n", intr & mc_int_mask)) goto out; if (fault->flags & E_NO_STATUS) { mcerr_pr("MC fault - no status: %s\n", fault->msg); goto out; } status = __mc_readl(err_channel, fault->stat_reg); addr = __mc_readl(err_channel, fault->addr_reg); secure = !!(status & MC_ERR_STATUS_SECURE); write = !!(status & MC_ERR_STATUS_WRITE); client_id = status & 0xff; client = &mc_clients[client_id <= mc_client_last ? client_id : mc_client_last]; /* * LPAE: make sure we get the extra 2 physical address bits available * and pass them down to the printing function. */ addr |= (((phys_addr_t)(status & MC_ERR_STATUS_ADR_HI)) << 12); if (fault->flags & E_SMMU) smmu_info = smmu_page_attrib[MC_ERR_SMMU_BITS(status)]; else smmu_info = NULL; mcerr_info_update(client, intr & mc_int_mask); if (mcerr_throttle_enabled && count >= MAX_PRINTS) { schedule_delayed_work(&unthrottle_prints_work, HZ/2); if (count == MAX_PRINTS) mcerr_pr("Too many MC errors; throttling prints\n"); goto out; } chip_specific.mcerr_print(fault, client, status, addr, secure, write, smmu_info); out: mc_writel(intr, MC_INT_STATUS); mc_readl(MC_INT_MASK); mc_writel(mc_int_mask, MC_INT_MASK); return IRQ_HANDLED; }