Esempio n. 1
0
asmlinkage long sys_ni_posix_timers(void)
{
	pr_err_once("process %d (%s) attempted a POSIX timer syscall "
		    "while CONFIG_POSIX_TIMERS is not set\n",
		    current->pid, current->comm);
	return -ENOSYS;
}
Esempio n. 2
0
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	unsigned int irq = irq_desc_get_irq(desc);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	if (likely(action)) {
		trace_irq_handler_entry(irq, action);
		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
		trace_irq_handler_exit(irq, action, res);
	} else {
		unsigned int cpu = smp_processor_id();
		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);

		if (enabled)
			irq_percpu_disable(desc, cpu);

		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
			    enabled ? " and unmasked" : "", irq, cpu);
	}

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}
Esempio n. 3
0
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
					       struct ib_udata *udata)
{
	struct c4iw_ucontext *context;
	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
	struct c4iw_alloc_ucontext_resp uresp;
	int ret = 0;
	struct c4iw_mm_entry *mm = NULL;

	pr_debug("ibdev %p\n", ibdev);
	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context) {
		ret = -ENOMEM;
		goto err;
	}

	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
	INIT_LIST_HEAD(&context->mmaps);
	spin_lock_init(&context->mmap_lock);
	kref_init(&context->kref);

	if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
		pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
		rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
	} else {
		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
		if (!mm) {
			ret = -ENOMEM;
			goto err_free;
		}

		uresp.status_page_size = PAGE_SIZE;

		spin_lock(&context->mmap_lock);
		uresp.status_page_key = context->key;
		context->key += PAGE_SIZE;
		spin_unlock(&context->mmap_lock);

		ret = ib_copy_to_udata(udata, &uresp,
				       sizeof(uresp) - sizeof(uresp.reserved));
		if (ret)
			goto err_mm;

		mm->key = uresp.status_page_key;
		mm->addr = virt_to_phys(rhp->rdev.status_page);
		mm->len = PAGE_SIZE;
		insert_mmap(context, mm);
	}
	return &context->ibucontext;
err_mm:
	kfree(mm);
err_free:
	kfree(context);
err:
	return ERR_PTR(ret);
}
Esempio n. 4
0
static void stress_inorder_work(struct work_struct *work)
{
	struct stress *stress = container_of(work, typeof(*stress), work);
	const int nlocks = stress->nlocks;
	struct ww_mutex *locks = stress->locks;
	struct ww_acquire_ctx ctx;
	int *order;

	order = get_random_order(nlocks);
	if (!order)
		return;

	do {
		int contended = -1;
		int n, err;

		ww_acquire_init(&ctx, &ww_class);
retry:
		err = 0;
		for (n = 0; n < nlocks; n++) {
			if (n == contended)
				continue;

			err = ww_mutex_lock(&locks[order[n]], &ctx);
			if (err < 0)
				break;
		}
		if (!err)
			dummy_load(stress);

		if (contended > n)
			ww_mutex_unlock(&locks[order[contended]]);
		contended = n;
		while (n--)
			ww_mutex_unlock(&locks[order[n]]);

		if (err == -EDEADLK) {
			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
			goto retry;
		}

		if (err) {
			pr_err_once("stress (%s) failed with %d\n",
				    __func__, err);
			break;
		}

		ww_acquire_fini(&ctx);
	} while (!time_after(jiffies, stress->timeout));

	kfree(order);
	kfree(stress);
}
static inline int manta_bat_get_ds2784(void)
{
	if (!manta_bat_ds2784_battery)
		manta_bat_ds2784_battery =
			power_supply_get_by_name("ds2784-fuelgauge");

	if (!manta_bat_ds2784_battery) {
		pr_err_once("%s: failed to get ds2784-fuelgauge power supply\n",
			    __func__);
		return -ENODEV;
	}

	return 0;
}
static int omap_read_current_temp(struct omap_temp_sensor *temp_sensor)
{
	int temp;

	temp = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	temp &= (OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK);

	if (!temp_sensor->is_efuse_valid)
		pr_err_once("Non-trimmed BGAP, Temp not accurate\n");

	if (temp < OMAP_ADC_START_VALUE || temp > OMAP_ADC_END_VALUE) {
		return -EINVAL;
	} else {
		return adc_to_temp[temp - OMAP_ADC_START_VALUE];
	}
}
static irqreturn_t omap_talert_irq_handler(int irq, void *data)
{
	struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
	int t_hot, t_cold, temp_offset, temp;
	char env_temp[20];
	char env_zone[20];
        char *envp[] = { env_temp, env_zone, NULL };

	t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
	    & OMAP4_HOT_FLAG_MASK;
	t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
	    & OMAP4_COLD_FLAG_MASK;
	temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
	if (t_hot) {
		temp_offset &= ~(OMAP4_MASK_HOT_MASK);
		temp_offset |= OMAP4_MASK_COLD_MASK;
	} else if (t_cold) {
		temp_offset &= ~(OMAP4_MASK_COLD_MASK);
		temp_offset |= OMAP4_MASK_HOT_MASK;
	}

	omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET);
	temp = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	temp &= (OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK);

	if (!temp_sensor->is_efuse_valid)
		pr_err_once("Non-trimmed BGAP, Temp not accurate\n");

	/* look up for temperature in the table and return the
	   temperature */
	if (temp < OMAP_ADC_START_VALUE || temp > OMAP_ADC_END_VALUE) {
		pr_err("invalid adc code reported by the sensor %d\n", temp);
	} else {
		temp_sensor->therm_fw->current_temp =
				adc_to_temp[temp - OMAP_ADC_START_VALUE];
		thermal_sensor_set_temp(temp_sensor->therm_fw);
		snprintf(env_temp, 20, "TEMP=%d",thermal_sensor_get_hotspot_temp(temp_sensor->therm_fw)/1000);
                envp[0] = env_temp;
		snprintf(env_zone, 20, "ZONE=%d",thermal_sensor_get_zone(temp_sensor->therm_fw));
                envp[1] = env_zone;
		kobject_uevent_env(&temp_sensor->dev->kobj, KOBJ_CHANGE, envp);
	}

	return IRQ_HANDLED;
}
static int omap_read_current_temp(struct omap_temp_sensor *temp_sensor)
{
	int adc;

	adc = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	adc &= (OMAP4_BGAP_TEMP_SENSOR_DTEMP_MASK);

	if (!temp_sensor->is_efuse_valid)
		pr_err_once("%s: Invalid EFUSE, Non-trimmed BGAP,"
			    "Temp not accurate\n", __func__ );

	if (adc < OMAP_ADC_START_VALUE || adc > OMAP_ADC_END_VALUE) {
		pr_err("%s:Invalid adc code reported by the sensor %d",
			__func__, adc);
		return -EINVAL;
	}

	return adc_to_temp_conversion(adc);
}
Esempio n. 9
0
static void read_source(struct power_clk_source *s)
{
	int i;

	mutex_lock(&s->lock);

	switch (s->type) {
	case QUADD_POWER_CLK_CPU:
		/* update cpu frequency */
		for (i = 0; i < nr_cpu_ids; i++)
			s->data[i].value = cpufreq_get(i);
		break;

	case QUADD_POWER_CLK_GPU:
		/* update gpu frequency */
		s->clkp = clk_get_sys("3d", NULL);
		if (s->clkp) {
			s->data[0].value =
				clk_get_rate(s->clkp) / 1000;
			clk_put(s->clkp);
		}
		break;

	case QUADD_POWER_CLK_EMC:
		/* update emc frequency */
		s->clkp = clk_get_sys("cpu", "emc");
		if (s->clkp) {
			s->data[0].value =
				clk_get_rate(s->clkp) / 1000;
			clk_put(s->clkp);
		}
		break;

	default:
		pr_err_once("%s: error: invalid power_clk type\n", __func__);
		return;
	}

	mutex_unlock(&s->lock);
	s->counter++;
}
static int omap_read_current_temp(struct omap_temp_sensor *temp_sensor)
{
	int adc, val;

	val = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	val |= OMAP4430_BGAP_TEMP_SENSOR_SOC;

	/* Start the Conversion */
	omap_temp_sensor_writel(temp_sensor, val, TEMP_SENSOR_CTRL_OFFSET);
	/* Wait for end of Conversion
	 * Conversion time is about 11-14 32K cycles (~0.5us)
	 * After some testing, for some reason EOCZ bit (8) is always low
	 * even when no conversion ongoing. Don't check it then ...
	 */
	usleep_range(1000, 1100);

	val = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	val &= ~OMAP4430_BGAP_TEMP_SENSOR_SOC;
	/* Stop the Conversion */
	omap_temp_sensor_writel(temp_sensor, val, TEMP_SENSOR_CTRL_OFFSET);

	adc = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);

	adc &= OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK;

	if (!temp_sensor->is_efuse_valid)
		pr_err_once("%s: Invalid EFUSE, Non-trimmed BGAP,"
			    "Temp not accurate\n", __func__);

	if (adc < 0 || adc > 128) {
		pr_err("%s:Invalid adc code reported by the sensor %d",
			__func__, adc);
		return -EINVAL;
	}

	return adc_to_temp_conversion(adc);
}
Esempio n. 11
0
File: net.c Progetto: avagin/linux
static int comp_rx_data(struct mbo *mbo)
{
	const u32 zero = 0;
	struct net_dev_context *nd;
	char *buf = mbo->virt_address;
	u32 len = mbo->processed_length;
	struct sk_buff *skb;
	struct net_device *dev;
	unsigned int skb_len;
	int ret = 0;

	nd = get_net_dev_hold(mbo->ifp);
	if (!nd)
		return -EIO;

	if (nd->rx.ch_id != mbo->hdm_channel_id) {
		ret = -EIO;
		goto put_nd;
	}

	dev = nd->dev;

	if (nd->is_mamac) {
		if (!pms_is_mamac(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
	} else {
		if (!PMS_IS_MEP(buf, len)) {
			ret = -EIO;
			goto put_nd;
		}

		skb = dev_alloc_skb(len - MEP_HDR_LEN);
	}

	if (!skb) {
		dev->stats.rx_dropped++;
		pr_err_once("drop packet: no memory for skb\n");
		goto out;
	}

	skb->dev = dev;

	if (nd->is_mamac) {
		/* dest */
		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);

		/* src */
		skb_put_data(skb, &zero, 4);
		skb_put_data(skb, buf + 5, 2);

		/* eth type */
		skb_put_data(skb, buf + 10, 2);

		buf += MDP_HDR_LEN;
		len -= MDP_HDR_LEN;
	} else {
		buf += MEP_HDR_LEN;
		len -= MEP_HDR_LEN;
	}

	skb_put_data(skb, buf, len);
	skb->protocol = eth_type_trans(skb, dev);
	skb_len = skb->len;
	if (netif_rx(skb) == NET_RX_SUCCESS) {
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb_len;
	} else {
		dev->stats.rx_dropped++;
	}

out:
	most_put_mbo(mbo);

put_nd:
	dev_put(nd->dev);
	return ret;
}
/**
 * Demux thread function handling data from specific TSIF.
 *
 * @arg: TSIF number
 */
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;
	int j;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		/* Lock against the TSPP filters data-structure */
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		/* Make sure channel is still active */
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			/*
			 * Go through all filled descriptors
			 * and perform demuxing on them
			 */
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				for (j = 0; j < notif_size; j++)
					dvb_dmx_swfilter_packet(
					 &mpq_demux->demux,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
				/*
				 * Notify TSPP that the buffer
				 * is no longer needed
				 */
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}
Esempio n. 13
0
static void get_capabilities(struct quadd_comm_cap *cap)
{
	int i, event;
	unsigned int extra = 0;
	struct quadd_events_cap *events_cap = &cap->events_cap;

	cap->pmu = ctx.pmu ? 1 : 0;

	cap->l2_cache = 0;
	if (ctx.pl310) {
		cap->l2_cache = 1;
		cap->l2_multiple_events = 0;
	} else if (ctx.pmu) {
		struct source_info *s = &ctx.pmu_info;
		for (i = 0; i < s->nr_supported_events; i++) {
			event = s->supported_events[i];
			if (event == QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES ||
			    event == QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES ||
			    event == QUADD_EVENT_TYPE_L2_ICACHE_MISSES) {
				cap->l2_cache = 1;
				cap->l2_multiple_events = 1;
				break;
			}
		}
	}

	events_cap->cpu_cycles = 0;
	events_cap->l1_dcache_read_misses = 0;
	events_cap->l1_dcache_write_misses = 0;
	events_cap->l1_icache_misses = 0;

	events_cap->instructions = 0;
	events_cap->branch_instructions = 0;
	events_cap->branch_misses = 0;
	events_cap->bus_cycles = 0;

	events_cap->l2_dcache_read_misses = 0;
	events_cap->l2_dcache_write_misses = 0;
	events_cap->l2_icache_misses = 0;

	if (ctx.pl310) {
		struct source_info *s = &ctx.pl310_info;
		for (i = 0; i < s->nr_supported_events; i++) {
			int event = s->supported_events[i];

			switch (event) {
			case QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES:
				events_cap->l2_dcache_read_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES:
				events_cap->l2_dcache_write_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L2_ICACHE_MISSES:
				events_cap->l2_icache_misses = 1;
				break;

			default:
				pr_err_once("%s: error: invalid event\n",
					    __func__);
				return;
			}
		}
	}

	if (ctx.pmu) {
		struct source_info *s = &ctx.pmu_info;
		for (i = 0; i < s->nr_supported_events; i++) {
			int event = s->supported_events[i];

			switch (event) {
			case QUADD_EVENT_TYPE_CPU_CYCLES:
				events_cap->cpu_cycles = 1;
				break;
			case QUADD_EVENT_TYPE_INSTRUCTIONS:
				events_cap->instructions = 1;
				break;
			case QUADD_EVENT_TYPE_BRANCH_INSTRUCTIONS:
				events_cap->branch_instructions = 1;
				break;
			case QUADD_EVENT_TYPE_BRANCH_MISSES:
				events_cap->branch_misses = 1;
				break;
			case QUADD_EVENT_TYPE_BUS_CYCLES:
				events_cap->bus_cycles = 1;
				break;

			case QUADD_EVENT_TYPE_L1_DCACHE_READ_MISSES:
				events_cap->l1_dcache_read_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L1_DCACHE_WRITE_MISSES:
				events_cap->l1_dcache_write_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L1_ICACHE_MISSES:
				events_cap->l1_icache_misses = 1;
				break;

			case QUADD_EVENT_TYPE_L2_DCACHE_READ_MISSES:
				events_cap->l2_dcache_read_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L2_DCACHE_WRITE_MISSES:
				events_cap->l2_dcache_write_misses = 1;
				break;
			case QUADD_EVENT_TYPE_L2_ICACHE_MISSES:
				events_cap->l2_icache_misses = 1;
				break;

			default:
				pr_err_once("%s: error: invalid event\n",
					    __func__);
				return;
			}
		}
	}

	cap->tegra_lp_cluster = quadd_is_cpu_with_lp_cluster();
	cap->power_rate = 1;
	cap->blocked_read = 1;

	extra |= QUADD_COMM_CAP_EXTRA_BT_KERNEL_CTX;
	extra |= QUADD_COMM_CAP_EXTRA_GET_MMAP;
	extra |= QUADD_COMM_CAP_EXTRA_GROUP_SAMPLES;
	extra |= QUADD_COMM_CAP_EXTRA_BT_UNWIND_TABLES;
	extra |= QUADD_COMM_CAP_EXTRA_SUPPORT_AARCH64;
	extra |= QUADD_COMM_CAP_EXTRA_SPECIAL_ARCH_MMAP;
	extra |= QUADD_COMM_CAP_EXTRA_UNWIND_MIXED;
	extra |= QUADD_COMM_CAP_EXTRA_UNW_ENTRY_TYPE;
	extra |= QUADD_COMM_CAP_EXTRA_RB_MMAP_OP;

	if (ctx.hrt->tc)
		extra |= QUADD_COMM_CAP_EXTRA_ARCH_TIMER;

	cap->reserved[QUADD_COMM_CAP_IDX_EXTRA] = extra;
}
static void bq5101xb_worker(struct work_struct *work)
{
	int batt_temp;
	int batt_volt;
	int batt_soc;
	int batt_status;
	int powered = 0;
	int wired = 0;
	int i;
	struct delayed_work *dwork;
	struct bq5101xb_chip *chip;
	struct bq5101xb_charger_platform_data *pdata;
	struct blocking_notifier_head *ntfy_hd;
	struct notifier_block *batt_ntfy;

	dwork = to_delayed_work(work);
	chip = container_of(dwork, struct bq5101xb_chip, bq5101xb_work);
	pdata = chip->dev->platform_data;

	if (!chip->batt_psy) {
		for (i = 0; i < pdata->num_supplies; i++) {
			chip->batt_psy =
				power_supply_get_by_name(pdata->supply_list[i]);

			if (!chip->batt_psy) {
				pr_err_once("Batt PSY Not Found\n");
				continue;
			}

			/* Confirm a batt psy */
			if (chip->batt_psy->type != POWER_SUPPLY_TYPE_BATTERY)
				chip->batt_psy = NULL;

			if (chip->batt_psy) {
				ntfy_hd = &chip->batt_psy->notify_head;
				batt_ntfy = &chip->psy_notifier;
				blocking_notifier_chain_register(ntfy_hd,
								 batt_ntfy);
				break;
			}
		}
	}

	if (chip->batt_psy) {
		if (pdata->check_powered)
			powered = pdata->check_powered();
		else if (pdata->chrg_b_pin > 0)
			powered = !gpio_get_value(pdata->chrg_b_pin);

		if (pdata->check_wired)
			wired = pdata->check_wired();

		if (bq5101xb_get_batt_info(chip->batt_psy,
					   POWER_SUPPLY_PROP_TEMP,
					   &batt_temp)) {
			dev_err(chip->dev, "Error Reading Temperature\n");
			return;
		}
		/* Convert Units to Celsius */
		batt_temp /= 10;

		if (bq5101xb_get_batt_info(chip->batt_psy,
					   POWER_SUPPLY_PROP_VOLTAGE_NOW,
					   &batt_volt)) {
			dev_err(chip->dev, "Error Reading Voltage\n");
			return;
		}

		if (bq5101xb_get_batt_info(chip->batt_psy,
					   POWER_SUPPLY_PROP_CAPACITY,
					   &batt_soc)) {
			dev_err(chip->dev, "Error Reading Capacity\n");
			return;
		}
		if (bq5101xb_get_batt_info(chip->batt_psy,
					   POWER_SUPPLY_PROP_STATUS,
					   &batt_status)) {
			dev_err(chip->dev, "Error Reading Status\n");
			return;
		}
	} else {
		pr_err_once("batt_psy not found\n");
		schedule_delayed_work(&chip->bq5101xb_work,
				      msecs_to_jiffies(100));
		return;
	}

	dev_dbg(chip->dev, "State Before = %d\n", chip->state);

	switch (chip->state) {
	case BQ5101XB_WAIT:
		if (wired && (pdata->priority == BQ5101XB_WIRED)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_WIRED_CONN;
		} else if (powered) {
			chip->state = BQ5101XB_RUNNING;
		} else if (batt_temp >= pdata->hot_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_HOT;
		} else if (batt_temp <= pdata->cold_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_COLD;
		}
		break;
	case BQ5101XB_WIRED_CONN:
		if (!wired) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 0);
			chip->state = BQ5101XB_WAIT;
		}
		break;
	case BQ5101XB_RUNNING:
		if (wired && (pdata->priority == BQ5101XB_WIRED)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_WIRED_CONN;
		} else if (!powered) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 0);
			chip->state = BQ5101XB_WAIT;
		} else if (batt_temp >= pdata->hot_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_HOT;
		} else if (batt_temp <= pdata->cold_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_COLD;
		} else if ((batt_soc >= BQ5101XB_CHRG_CMPLT_SOC) &&
			   (batt_status == POWER_SUPPLY_STATUS_FULL)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_CHRG_CMPLT;
		}
		break;
	case BQ5101XB_OUT_OF_TEMP_HOT:
		if (wired && (pdata->priority == BQ5101XB_WIRED)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_WIRED_CONN;
		} else if (batt_temp < (pdata->hot_temp -
					BQ5101XB_TEMP_HYS)) {
			if ((batt_soc >= BQ5101XB_CHRG_CMPLT_SOC) &&
			    (batt_status == POWER_SUPPLY_STATUS_FULL)) {
				bq5101xb_set_pins(pdata, 1, 1, 0, 0);
				chip->state = BQ5101XB_CHRG_CMPLT;
			} else {
				bq5101xb_set_pins(pdata, 0, 0, 1, 0);
				chip->state = BQ5101XB_WAIT;
			}
		}
		break;
	case BQ5101XB_OUT_OF_TEMP_COLD:
		if (wired && (pdata->priority == BQ5101XB_WIRED)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_WIRED_CONN;
		} else if (batt_temp > (pdata->cold_temp +
					BQ5101XB_TEMP_HYS)) {
			if ((batt_soc >= BQ5101XB_CHRG_CMPLT_SOC) &&
			    (batt_status == POWER_SUPPLY_STATUS_FULL)) {
				bq5101xb_set_pins(pdata, 1, 1, 0, 0);
				chip->state = BQ5101XB_CHRG_CMPLT;
			} else {
				bq5101xb_set_pins(pdata, 0, 0, 1, 0);
				chip->state = BQ5101XB_WAIT;
			}
		}
		break;
	case BQ5101XB_CHRG_CMPLT:
		if (wired && (pdata->priority == BQ5101XB_WIRED)) {
			bq5101xb_set_pins(pdata, 1, 1, 0, 0);
			chip->state = BQ5101XB_WIRED_CONN;
		} else if (batt_temp >= pdata->hot_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_HOT;
		} else if (batt_temp <= pdata->cold_temp) {
			bq5101xb_set_pins(pdata, 0, 0, 1, 1);
			chip->state = BQ5101XB_OUT_OF_TEMP_COLD;
		} else if ((batt_soc <= pdata->resume_soc) ||
			   (batt_volt <= pdata->resume_vbatt)){
			bq5101xb_set_pins(pdata, 0, 0, 1, 0);
			chip->state = BQ5101XB_WAIT;
		}
		break;
	}

	dev_dbg(chip->dev, "State After = %d\n", chip->state);

	return;
}
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				mpq_dmx_tspp_swfilter_desc(mpq_demux,
					tspp_data_desc);
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}
Esempio n. 16
0
/*
 * Encode ALLOCATE request
 */
static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
				  struct xdr_stream *xdr,
				  struct nfs42_falloc_args *args)
{
	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->falloc_fh, &hdr);
	encode_allocate(xdr, args, &hdr);
	encode_getfattr(xdr, args->falloc_bitmask, &hdr);
	encode_nops(&hdr);
}

/*
 * Encode COPY request
 */
static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
			      struct xdr_stream *xdr,
			      struct nfs42_copy_args *args)
{
	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->src_fh, &hdr);
	encode_savefh(xdr, &hdr);
	encode_putfh(xdr, args->dst_fh, &hdr);
	encode_copy(xdr, args, &hdr);
	encode_nops(&hdr);
}

/*
 * Encode DEALLOCATE request
 */
static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
				    struct xdr_stream *xdr,
				    struct nfs42_falloc_args *args)
{
	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->falloc_fh, &hdr);
	encode_deallocate(xdr, args, &hdr);
	encode_getfattr(xdr, args->falloc_bitmask, &hdr);
	encode_nops(&hdr);
}

/*
 * Encode SEEK request
 */
static void nfs4_xdr_enc_seek(struct rpc_rqst *req,
			      struct xdr_stream *xdr,
			      struct nfs42_seek_args *args)
{
	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->sa_fh, &hdr);
	encode_seek(xdr, args, &hdr);
	encode_nops(&hdr);
}

/*
 * Encode LAYOUTSTATS request
 */
static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req,
				     struct xdr_stream *xdr,
				     struct nfs42_layoutstat_args *args)
{
	int i;

	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->fh, &hdr);
	WARN_ON(args->num_dev > PNFS_LAYOUTSTATS_MAXDEV);
	for (i = 0; i < args->num_dev; i++)
		encode_layoutstats(xdr, args, &args->devinfo[i], &hdr);
	encode_nops(&hdr);
}

/*
 * Encode CLONE request
 */
static void nfs4_xdr_enc_clone(struct rpc_rqst *req,
			       struct xdr_stream *xdr,
			       struct nfs42_clone_args *args)
{
	struct compound_hdr hdr = {
		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
	};

	encode_compound_hdr(xdr, req, &hdr);
	encode_sequence(xdr, &args->seq_args, &hdr);
	encode_putfh(xdr, args->src_fh, &hdr);
	encode_savefh(xdr, &hdr);
	encode_putfh(xdr, args->dst_fh, &hdr);
	encode_clone(xdr, args, &hdr);
	encode_getfattr(xdr, args->dst_bitmask, &hdr);
	encode_nops(&hdr);
}

static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
	return decode_op_hdr(xdr, OP_ALLOCATE);
}

static int decode_write_response(struct xdr_stream *xdr,
				 struct nfs42_write_res *res)
{
	__be32 *p;

	p = xdr_inline_decode(xdr, 4 + 8 + 4);
	if (unlikely(!p))
		goto out_overflow;

	/*
	 * We never use asynchronous mode, so warn if a server returns
	 * a stateid.
	 */
	if (unlikely(*p != 0)) {
		pr_err_once("%s: server has set unrequested "
				"asynchronous mode\n", __func__);
		return -EREMOTEIO;
	}
	p++;
	p = xdr_decode_hyper(p, &res->count);
	res->verifier.committed = be32_to_cpup(p);
	return decode_verifier(xdr, &res->verifier.verifier);

out_overflow:
	print_overflow_msg(__func__, xdr);
	return -EIO;
}

static int decode_copy_requirements(struct xdr_stream *xdr,
				    struct nfs42_copy_res *res) {
	__be32 *p;

	p = xdr_inline_decode(xdr, 4 + 4);
	if (unlikely(!p))
		goto out_overflow;

	res->consecutive = be32_to_cpup(p++);
	res->synchronous = be32_to_cpup(p++);
	return 0;
out_overflow:
	print_overflow_msg(__func__, xdr);
	return -EIO;
}

static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
{
	int status;

	status = decode_op_hdr(xdr, OP_COPY);
	if (status == NFS4ERR_OFFLOAD_NO_REQS) {
		status = decode_copy_requirements(xdr, res);
		if (status)
			return status;
		return NFS4ERR_OFFLOAD_NO_REQS;
	} else if (status)
		return status;

	status = decode_write_response(xdr, &res->write_res);
	if (status)
		return status;

	return decode_copy_requirements(xdr, res);
}

static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
	return decode_op_hdr(xdr, OP_DEALLOCATE);
}

static int decode_seek(struct xdr_stream *xdr, struct nfs42_seek_res *res)
{
	int status;
	__be32 *p;

	status = decode_op_hdr(xdr, OP_SEEK);
	if (status)
		return status;

	p = xdr_inline_decode(xdr, 4 + 8);
	if (unlikely(!p))
		goto out_overflow;

	res->sr_eof = be32_to_cpup(p++);
	p = xdr_decode_hyper(p, &res->sr_offset);
	return 0;

out_overflow:
	print_overflow_msg(__func__, xdr);
	return -EIO;
}

static int decode_layoutstats(struct xdr_stream *xdr)
{
	return decode_op_hdr(xdr, OP_LAYOUTSTATS);
}

static int decode_clone(struct xdr_stream *xdr)
{
	return decode_op_hdr(xdr, OP_CLONE);
}

/*
 * Decode ALLOCATE request
 */
static int nfs4_xdr_dec_allocate(struct rpc_rqst *rqstp,
				 struct xdr_stream *xdr,
				 struct nfs42_falloc_res *res)
{
	struct compound_hdr hdr;
	int status;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_allocate(xdr, res);
	if (status)
		goto out;
	decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
out:
	return status;
}

/*
 * Decode COPY response
 */
static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
			     struct xdr_stream *xdr,
			     struct nfs42_copy_res *res)
{
	struct compound_hdr hdr;
	int status;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_savefh(xdr);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_copy(xdr, res);
out:
	return status;
}

/*
 * Decode DEALLOCATE request
 */
static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
				   struct xdr_stream *xdr,
				   struct nfs42_falloc_res *res)
{
	struct compound_hdr hdr;
	int status;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_deallocate(xdr, res);
	if (status)
		goto out;
	decode_getfattr(xdr, res->falloc_fattr, res->falloc_server);
out:
	return status;
}

/*
 * Decode SEEK request
 */
static int nfs4_xdr_dec_seek(struct rpc_rqst *rqstp,
			     struct xdr_stream *xdr,
			     struct nfs42_seek_res *res)
{
	struct compound_hdr hdr;
	int status;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_seek(xdr, res);
out:
	return status;
}

/*
 * Decode LAYOUTSTATS request
 */
static int nfs4_xdr_dec_layoutstats(struct rpc_rqst *rqstp,
				    struct xdr_stream *xdr,
				    struct nfs42_layoutstat_res *res)
{
	struct compound_hdr hdr;
	int status, i;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	WARN_ON(res->num_dev > PNFS_LAYOUTSTATS_MAXDEV);
	for (i = 0; i < res->num_dev; i++) {
		status = decode_layoutstats(xdr);
		if (status)
			goto out;
	}
out:
	res->rpc_status = status;
	return status;
}

/*
 * Decode CLONE request
 */
static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
			      struct xdr_stream *xdr,
			      struct nfs42_clone_res *res)
{
	struct compound_hdr hdr;
	int status;

	status = decode_compound_hdr(xdr, &hdr);
	if (status)
		goto out;
	status = decode_sequence(xdr, &res->seq_res, rqstp);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_savefh(xdr);
	if (status)
		goto out;
	status = decode_putfh(xdr);
	if (status)
		goto out;
	status = decode_clone(xdr);
	if (status)
		goto out;
	status = decode_getfattr(xdr, res->dst_fattr, res->server);

out:
	res->rpc_status = status;
	return status;
}