Пример #1
0
int __init omap3_clk_init(void)
{
	struct clk **clkp;

	clk_init(&omap3_clk_functions);

	for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
	     clkp++) {

		if ((*clkp)->flags & CLOCK_IN_OMAP343X) {
			clk_register(*clkp);
			continue;
		}
	}

#ifdef CONFIG_OMAP34XX_OFFMODE
	spin_lock_init(&inatimer_lock);
	init_timer_deferrable(&coredomain_timer);
	init_timer_deferrable(&perdomain_timer);

	coredomain_timer.function = coredomain_timer_func;
	perdomain_timer.function = perdomain_timer_func;
#endif /* #ifdef CONFIG_OMAP34XX_OFFMODE */

	return 0;
}
Пример #2
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
		q->ht[i] = SFQ_DEPTH;

	for (i = 0; i < SFQ_DEPTH; i++) {
		skb_queue_head_init(&q->qs[i]);
		q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH;
		q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
	}

	q->limit = SFQ_DEPTH - 1;
	q->max_depth = 0;
	q->tail = SFQ_DEPTH;
	if (opt == NULL) {
		q->quantum = psched_mtu(sch->dev);
		q->perturb_period = 0;
		q->perturbation = net_random();
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	for (i = 0; i < SFQ_DEPTH; i++)
		sfq_link(q, i);
	return 0;
}
Пример #3
0
static int __init cpufreq_interactivex_init(void)
{
	unsigned int i;
	struct timer_list *t;
	min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
	resum_speed = RESUME_SPEED;
	freq_threshld = FREQ_THRESHOLD;

	/* Initalize per-cpu timers */
	for_each_possible_cpu(i) {
		t = &per_cpu(cpu_timer, i);
		init_timer_deferrable(t);
		t->function = cpufreq_interactivex_timer;
		t->data = i;
	}

	/* Scale up is high priority */
	up_wq = create_workqueue("kinteractive_up");
	down_wq = create_workqueue("knteractive_down");

	INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work);

        pr_info("[imoseyon] interactiveX enter\n");
	return cpufreq_register_governor(&cpufreq_gov_interactivex);
}
static int __init cpufreq_interactive_init(void)
{
	unsigned int i;
	struct timer_list *t;
	min_sample_time = DEFAULT_MIN_SAMPLE_TIME;

	/* find nr_running addres */
	nr_running_addr = (nr_running_type)kallsyms_lookup_name("nr_running");
	if (!nr_running_addr)
	    return -1;

	/* Initalize per-cpu timers */
	for_each_possible_cpu(i) {
		t = &per_cpu(cpu_timer, i);
		init_timer_deferrable(t);
		t->function = cpufreq_interactive_timer;
		t->data = i;
	}

	/* Scale up is high priority */
	up_wq = create_rt_workqueue("kinteractive_up");
	down_wq = create_workqueue("knteractive_down");

	INIT_WORK(&freq_scale_work, cpufreq_interactive_freq_change_time_work);

	return cpufreq_register_governor(&cpufreq_gov_interactive);
}
Пример #5
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
		q->dep[i].next = i + SFQ_MAX_FLOWS;
		q->dep[i].prev = i + SFQ_MAX_FLOWS;
	}

	q->limit = SFQ_MAX_DEPTH;
	q->maxdepth = SFQ_MAX_DEPTH;
	q->cur_depth = 0;
	q->tail = NULL;
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
	q->maxflows = SFQ_DEFAULT_FLOWS;
	q->quantum = psched_mtu(qdisc_dev(sch));
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	q->perturb_period = 0;
	q->perturbation = prandom_u32();

	if (opt) {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
	if (!q->ht || !q->slots) {
		sfq_destroy(sch);
		return -ENOMEM;
	}
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < q->maxflows; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
	return 0;
}
Пример #6
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	size_t sz;
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_DEPTH; i++) {
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
	}

	q->limit = SFQ_DEPTH - 1;
	q->cur_depth = 0;
	q->tail = NULL;
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
	if (opt == NULL) {
		q->quantum = psched_mtu(qdisc_dev(sch));
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
		q->perturb_period = 0;
		q->perturbation = net_random();
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	sz = sizeof(q->ht[0]) * q->divisor;
	q->ht = kmalloc(sz, GFP_KERNEL);
	if (!q->ht && sz > PAGE_SIZE)
		q->ht = vmalloc(sz);
	if (!q->ht)
		return -ENOMEM;
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
	return 0;
}
Пример #7
0
void ath6kl_recovery_init(struct ath6kl *ar)
{
	struct ath6kl_fw_recovery *recovery = &ar->fw_recovery;

	clear_bit(RECOVERY_CLEANUP, &ar->flag);
	INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work);
	recovery->seq_num = 0;
	recovery->hb_misscnt = 0;
	ar->fw_recovery.hb_pending = false;
	ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
	ar->fw_recovery.hb_timer.data = (unsigned long) ar;
	init_timer_deferrable(&ar->fw_recovery.hb_timer);

	if (ar->fw_recovery.hb_poll)
		mod_timer(&ar->fw_recovery.hb_timer, jiffies +
			  msecs_to_jiffies(ar->fw_recovery.hb_poll));
}
static int __init cpufreq_interactive_init(void)
{
	unsigned int i;
	struct timer_list *t;
	min_sample_time = DEFAULT_MIN_SAMPLE_TIME;

	/* Initalize per-cpu timers */
	for_each_possible_cpu(i) {
		t = &per_cpu(cpu_timer, i);
		init_timer_deferrable(t);
		t->function = cpufreq_interactive_timer;
		t->data = i;
	}

	/* Scale up is high priority */
	up_wq = alloc_workqueue("kinteractive_up", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
	down_wq = create_workqueue("knteractive_down");

	INIT_WORK(&freq_scale_work, cpufreq_interactive_freq_change_time_work);

        pr_info("[imoseyon] interactive enter\n");
	return cpufreq_register_governor(&cpufreq_gov_interactive);
}
Пример #9
0
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

	q->perturb_timer.function = sfq_perturbation;
	q->perturb_timer.data = (unsigned long)sch;
	init_timer_deferrable(&q->perturb_timer);

	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

	for (i = 0; i < SFQ_DEPTH; i++) {
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
	}

	q->limit = SFQ_DEPTH - 1;
	q->cur_depth = 0;
	q->tail = NULL;
	if (opt == NULL) {
		q->quantum = psched_mtu(qdisc_dev(sch));
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
		q->perturb_period = 0;
		q->perturbation = net_random();
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}

	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
		sfq_link(q, i);
	}
	return 0;
}
Пример #10
0
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                                  u16 timeout)
{
    struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
    struct ieee80211_sub_if_data *sdata = sta->sdata;
    struct ieee80211_local *local = sdata->local;
    struct tid_ampdu_tx *tid_tx;
    int ret = 0;

    trace_api_start_tx_ba_session(pubsta, tid);

    if (WARN_ON(!local->ops->ampdu_action))
        return -EINVAL;

    if ((tid >= STA_TID_NUM) ||
            !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
            (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
        return -EINVAL;

#ifdef CONFIG_MAC80211_HT_DEBUG
    printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
           pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

    if (sdata->vif.type != NL80211_IFTYPE_STATION &&
            sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
            sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
            sdata->vif.type != NL80211_IFTYPE_AP &&
            sdata->vif.type != NL80211_IFTYPE_ADHOC)
        return -EINVAL;

    if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA sessions blocked. "
               "Denying BA session request\n");
#endif
        return -EINVAL;
    }

    /*
     * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
     * member of an IBSS, and has no other existing Block Ack agreement
     * with the recipient STA, then the initiating STA shall transmit a
     * Probe Request frame to the recipient STA and shall not transmit an
     * ADDBA Request frame unless it receives a Probe Response frame
     * from the recipient within dot11ADDBAFailureTimeout.
     *
     * The probe request mechanism for ADDBA is currently not implemented,
     * but we only build up Block Ack session with HT STAs. This information
     * is set when we receive a bss info from a probe response or a beacon.
     */
    if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
            !sta->sta.ht_cap.ht_supported) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
               "does not advertise HT support\n", pubsta->addr);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        return -EINVAL;
    }

    spin_lock_bh(&sta->lock);

    /* we have tried too many times, receiver does not want A-MPDU */
    if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
        ret = -EBUSY;
        goto err_unlock_sta;
    }

    /*
     * if we have tried more than HT_AGG_BURST_RETRIES times we
     * will spread our requests in time to avoid stalling connection
     * for too long
     */
    if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
            time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
                        HT_AGG_RETRIES_PERIOD)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - "
               "waiting a grace period after %d failed requests "
               "on tid %u\n",
               sta->ampdu_mlme.addba_req_num[tid], tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        ret = -EBUSY;
        goto err_unlock_sta;
    }

    tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
    /* check if the TID is not in aggregation flow already */
    if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - session is not "
               "idle on tid %u\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        ret = -EAGAIN;
        goto err_unlock_sta;
    }

    /* prepare A-MPDU MLME for Tx aggregation */
    tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
    if (!tid_tx) {
        ret = -ENOMEM;
        goto err_unlock_sta;
    }

    skb_queue_head_init(&tid_tx->pending);
    __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

    tid_tx->timeout = timeout;

    /* response timer */
    tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
    tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
    init_timer(&tid_tx->addba_resp_timer);

    /* tx timer */
    tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
    tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
    init_timer_deferrable(&tid_tx->session_timer);

    /* assign a dialog token */
    sta->ampdu_mlme.dialog_token_allocator++;
    tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

    /*
     * Finally, assign it to the start array; the work item will
     * collect it and move it to the normal array.
     */
    sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

    ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

    /* this flow continues off the work */
err_unlock_sta:
    spin_unlock_bh(&sta->lock);
    return ret;
}
Пример #11
0
static int hci_h4p_probe(struct platform_device *pdev)
{
	struct omap_bluetooth_config *bt_config;
	struct hci_h4p_info *info;
	int irq, err;

	dev_info(&pdev->dev, "Registering HCI H4P device\n");
	info = kzalloc(sizeof(struct hci_h4p_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	info->dev = &pdev->dev;
	info->pm_enabled = 0;
	info->tx_enabled = 1;
	info->rx_enabled = 1;
	info->garbage_bytes = 0;
	info->tx_clocks_en = 0;
	info->rx_clocks_en = 0;
	irq = 0;
	spin_lock_init(&info->lock);
	spin_lock_init(&info->clocks_lock);
	skb_queue_head_init(&info->txq);

	if (pdev->dev.platform_data == NULL) {
		dev_err(&pdev->dev, "Could not get Bluetooth config data\n");
		kfree(info);
		return -ENODATA;
	}

	bt_config = pdev->dev.platform_data;
	info->chip_type = bt_config->chip_type;
	info->bt_wakeup_gpio = bt_config->bt_wakeup_gpio;
	info->host_wakeup_gpio = bt_config->host_wakeup_gpio;
	info->reset_gpio = bt_config->reset_gpio;
	info->bt_sysclk = bt_config->bt_sysclk;

	NBT_DBG("RESET gpio: %d\n", info->reset_gpio);
	NBT_DBG("BTWU gpio: %d\n", info->bt_wakeup_gpio);
	NBT_DBG("HOSTWU gpio: %d\n", info->host_wakeup_gpio);
	NBT_DBG("Uart: %d\n", bt_config->bt_uart);
	NBT_DBG("sysclk: %d\n", info->bt_sysclk);

	err = gpio_request(info->reset_gpio, "bt_reset");
	if (err < 0) {
		dev_err(&pdev->dev, "Cannot get GPIO line %d\n",
			info->reset_gpio);
		goto cleanup_setup;
	}

	err = gpio_request(info->bt_wakeup_gpio, "bt_wakeup");
	if (err < 0)
	{
		dev_err(info->dev, "Cannot get GPIO line 0x%d",
			info->bt_wakeup_gpio);
		gpio_free(info->reset_gpio);
		goto cleanup_setup;
	}

	err = gpio_request(info->host_wakeup_gpio, "host_wakeup");
	if (err < 0)
	{
		dev_err(info->dev, "Cannot get GPIO line %d",
		       info->host_wakeup_gpio);
		gpio_free(info->reset_gpio);
		gpio_free(info->bt_wakeup_gpio);
		goto cleanup_setup;
	}

	gpio_direction_output(info->reset_gpio, 0);
	gpio_direction_output(info->bt_wakeup_gpio, 0);
	gpio_direction_input(info->host_wakeup_gpio);

	switch (bt_config->bt_uart) {
	case 1:
		if (cpu_is_omap16xx()) {
			irq = INT_UART1;
			info->uart_fclk = clk_get(NULL, "uart1_ck");
		} else if (cpu_is_omap24xx()) {
			irq = INT_24XX_UART1_IRQ;
			info->uart_iclk = clk_get(NULL, "uart1_ick");
			info->uart_fclk = clk_get(NULL, "uart1_fck");
		}
		info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART1_BASE);
		break;
	case 2:
		if (cpu_is_omap16xx()) {
			irq = INT_UART2;
			info->uart_fclk = clk_get(NULL, "uart2_ck");
		} else {
			irq = INT_24XX_UART2_IRQ;
			info->uart_iclk = clk_get(NULL, "uart2_ick");
			info->uart_fclk = clk_get(NULL, "uart2_fck");
		}
		info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART2_BASE);
		break;
	case 3:
		if (cpu_is_omap16xx()) {
			irq = INT_UART3;
			info->uart_fclk = clk_get(NULL, "uart3_ck");
		} else {
			irq = INT_24XX_UART3_IRQ;
			info->uart_iclk = clk_get(NULL, "uart3_ick");
			info->uart_fclk = clk_get(NULL, "uart3_fck");
		}
		info->uart_base = OMAP2_IO_ADDRESS(OMAP_UART3_BASE);
		break;
	default:
		dev_err(info->dev, "No uart defined\n");
		goto cleanup;
	}

	info->irq = irq;
	err = request_irq(irq, hci_h4p_interrupt, IRQF_DISABLED, "hci_h4p",
			  info);
	if (err < 0) {
		dev_err(info->dev, "hci_h4p: unable to get IRQ %d\n", irq);
		goto cleanup;
	}

	err = request_irq(gpio_to_irq(info->host_wakeup_gpio),
			  hci_h4p_wakeup_interrupt,  IRQF_TRIGGER_FALLING |
			  IRQF_TRIGGER_RISING | IRQF_DISABLED,
			  "hci_h4p_wkup", info);
	if (err < 0) {
		dev_err(info->dev, "hci_h4p: unable to get wakeup IRQ %d\n",
			  gpio_to_irq(info->host_wakeup_gpio));
		free_irq(irq, info);
		goto cleanup;
	}

	err = set_irq_wake(gpio_to_irq(info->host_wakeup_gpio), 1);
	if (err < 0) {
		dev_err(info->dev, "hci_h4p: unable to set wakeup for IRQ %d\n",
				gpio_to_irq(info->host_wakeup_gpio));
		free_irq(irq, info);
		free_irq(gpio_to_irq(info->host_wakeup_gpio), info);
		goto cleanup;
	}

	init_timer_deferrable(&info->lazy_release);
	info->lazy_release.function = hci_h4p_lazy_clock_release;
	info->lazy_release.data = (unsigned long)info;
	hci_h4p_set_clk(info, &info->tx_clocks_en, 1);
	err = hci_h4p_reset_uart(info);
	if (err < 0)
		goto cleanup_irq;
	hci_h4p_init_uart(info);
	hci_h4p_set_rts(info, 0);
	err = hci_h4p_reset(info);
	hci_h4p_reset_uart(info);
	if (err < 0)
		goto cleanup_irq;
	gpio_set_value(info->reset_gpio, 0);
	hci_h4p_set_clk(info, &info->tx_clocks_en, 0);

	platform_set_drvdata(pdev, info);

	if (hci_h4p_register_hdev(info) < 0) {
		dev_err(info->dev, "failed to register hci_h4p hci device\n");
		goto cleanup_irq;
	}

	return 0;

cleanup_irq:
	free_irq(irq, (void *)info);
	free_irq(gpio_to_irq(info->host_wakeup_gpio), info);
cleanup:
	gpio_set_value(info->reset_gpio, 0);
	gpio_free(info->reset_gpio);
	gpio_free(info->bt_wakeup_gpio);
	gpio_free(info->host_wakeup_gpio);

cleanup_setup:

	kfree(info);
	return err;

}