static irqreturn_t sch_gpio_irq_handler(int irq, void *dev_id) { int res; int i, ret = IRQ_NONE; for (i = 0; i < sch_gpio_core.ngpio; i++) { res = sch_gpio_reg_get(CGTS, i); if (res) { /* clear by setting TS to 1 */ sch_gpio_reg_set(CGTS, i, 1); generic_handle_irq(chip_ptr->irq_base_core + i); ret = IRQ_HANDLED; } } for (i = 0; i < sch_gpio_resume.ngpio; i++) { res = sch_gpio_reg_get(RGTS, i); if (res) { /* clear by setting TS to 1 */ sch_gpio_reg_set(RGTS, i, 1); generic_handle_irq(chip_ptr->irq_base_resume + i); ret = IRQ_HANDLED; } } return ret; }
static void sch_gpio_resume_irq_ack(struct irq_data *d) { u32 gpio_num = 0; gpio_num = d->irq - chip_ptr->irq_base_resume; sch_gpio_reg_set(RGTS, gpio_num, 1); }
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) { struct sch_gpio *sch = to_sch_gpio(gc); spin_lock(&sch->lock); sch_gpio_reg_set(gc, gpio_num, GLV, val); spin_unlock(&sch->lock); }
static void sch_gpio_core_set(struct gpio_chip *gc, unsigned gpio_num, int val) { unsigned long flags = 0; spin_lock_irqsave(&gpio_lock, flags); sch_gpio_reg_set(CGLV, gpio_num, val); spin_unlock_irqrestore(&gpio_lock, flags); }
static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num) { struct sch_gpio *sch = to_sch_gpio(gc); spin_lock(&sch->lock); sch_gpio_reg_set(gc, gpio_num, GIO, 1); spin_unlock(&sch->lock); return 0; }
static void sch_gpio_resume_irq_disable_all(struct sch_gpio *chip, unsigned int num) { unsigned long flags = 0; u32 gpio_num = 0; spin_lock_irqsave(&gpio_lock, flags); for (gpio_num = 0; gpio_num < num; gpio_num++) { sch_gpio_reg_clear_if_set(RGTPE, gpio_num); sch_gpio_reg_clear_if_set(RGTNE, gpio_num); sch_gpio_reg_clear_if_set(RGGPE, gpio_num); sch_gpio_reg_clear_if_set(RGSMI, gpio_num); sch_gpio_reg_clear_if_set(RGNMIEN, gpio_num); /* clear any pending interrupt */ sch_gpio_reg_set(RGTS, gpio_num, 1); } spin_unlock_irqrestore(&gpio_lock, flags); }
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num, int val) { struct sch_gpio *sch = to_sch_gpio(gc); spin_lock(&sch->lock); sch_gpio_reg_set(gc, gpio_num, GIO, 0); spin_unlock(&sch->lock); /* * according to the datasheet, writing to the level register has no * effect when GPIO is programmed as input. * Actually the the level register is read-only when configured as input. * Thus presetting the output level before switching to output is _NOT_ possible. * Hence we set the level after configuring the GPIO as output. * But we cannot prevent a short low pulse if direction is set to high * and an external pull-up is connected. */ sch_gpio_set(gc, gpio_num, val); return 0; }
static int sch_gpio_probe(struct platform_device *pdev) { struct sch_gpio *sch; struct resource *res; sch = devm_kzalloc(&pdev->dev, sizeof(*sch), GFP_KERNEL); if (!sch) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -EBUSY; if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) return -EBUSY; spin_lock_init(&sch->lock); sch->iobase = res->start; sch->chip = sch_gpio_chip; sch->chip.label = dev_name(&pdev->dev); sch->chip.parent = &pdev->dev; switch (pdev->id) { case PCI_DEVICE_ID_INTEL_SCH_LPC: sch->core_base = 0; sch->resume_base = 10; sch->chip.ngpio = 14; /* * GPIO[6:0] enabled by default * GPIO7 is configured by the CMC as SLPIOVR * Enable GPIO[9:8] core powered gpios explicitly */ sch_gpio_reg_set(&sch->chip, 8, GEN, 1); sch_gpio_reg_set(&sch->chip, 9, GEN, 1); /* * SUS_GPIO[2:0] enabled by default * Enable SUS_GPIO3 resume powered gpio explicitly */ sch_gpio_reg_set(&sch->chip, 13, GEN, 1); break; case PCI_DEVICE_ID_INTEL_ITC_LPC: sch->core_base = 0; sch->resume_base = 5; sch->chip.ngpio = 14; break; case PCI_DEVICE_ID_INTEL_CENTERTON_ILB: sch->core_base = 0; sch->resume_base = 21; sch->chip.ngpio = 30; break; case PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB: sch->core_base = 0; sch->resume_base = 2; sch->chip.ngpio = 8; break; default: return -ENODEV; } platform_set_drvdata(pdev, sch); return gpiochip_add(&sch->chip); }