static int __init pn547_dev_init(void)
{
    pr_info("Loading PN547 driver\n");
    async_schedule(async_dev_init, NULL);

    return 0;
}
static int mitigate_inrush_notifier_cb(struct notifier_block *nb,
				unsigned long code, void *ss_handle)
{
	struct subsystem *subsys = notifier_to_subsystem(nb);
	struct inrush_driver_data *drv_data = subsys->drv_data;

	if (subsys->booted)
		return NOTIFY_DONE;

	switch (code) {
	case SUBSYS_AFTER_POWERUP:
		pr_info("%s: subsystem %s has completed powerup\n", __func__,
							subsys->name);
		subsys->booted = true;
		drv_data->subsys_boot_count++;
		break;
	default:
		return NOTIFY_DONE;
	}

	/*
	 * If all subsystems are up, job of this driver ends, lets
	 * free resources.
	 */
	if (drv_data->subsys_count == drv_data->subsys_boot_count)
		async_schedule(free_resources, drv_data);

	return NOTIFY_DONE;
}
static int ufs_test_run_multi_query_test(struct test_data *td)
{
	int i;
	struct scsi_device *sdev;
	struct ufs_hba *hba;

	BUG_ON(!td || !td->req_q || !td->req_q->queuedata);
	sdev = (struct scsi_device *)td->req_q->queuedata;
	BUG_ON(!sdev->host);
	hba = shost_priv(sdev->host);
	BUG_ON(!hba);

	atomic_set(&utd->outstanding_threads, 0);
	utd->fail_threads = 0;
	init_completion(&utd->outstanding_complete);
	for (i = 0; i < MAX_PARALLEL_QUERIES; ++i) {
		atomic_inc(&utd->outstanding_threads);
		async_schedule(ufs_test_random_async_query, hba);
	}

	if (!wait_for_completion_timeout(&utd->outstanding_complete,
			THREADS_COMPLETION_TIMOUT)) {
		pr_err("%s: Multi-query test timed-out %d threads left",
			__func__, atomic_read(&utd->outstanding_threads));
	}
	test_iosched_mark_test_completion();
	return 0;
}
static void ufs_test_run_synchronous_scenario(struct test_scenario *read_data)
{
	init_completion(&utd->outstanding_complete);
	atomic_set(&utd->outstanding_threads, 1);
	async_schedule(ufs_test_run_scenario, read_data);
	if (!wait_for_completion_timeout(&utd->outstanding_complete,
			THREADS_COMPLETION_TIMOUT)) {
		pr_err("%s: Multi-thread test timed-out %d threads left",
			__func__, atomic_read(&utd->outstanding_threads));
	}
}
static int ufs_test_run_parallel_read_and_write_test(
	struct test_iosched *test_iosched)
{
	struct test_scenario *read_data, *write_data;
	int i;
	bool changed_seed = false;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_50);
	write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_50);

	/* allow randomness even if user forgot */
	if (utd->random_test_seed <= 0) {
		changed_seed = true;
		utd->random_test_seed = 1;
	}

	atomic_set(&utd->outstanding_threads, 0);
	utd->fail_threads = 0;
	init_completion(&utd->outstanding_complete);

	for (i = 0; i < (RANDOM_REQUEST_THREADS / 2); i++) {
		async_schedule(ufs_test_run_scenario, read_data);
		async_schedule(ufs_test_run_scenario, write_data);
		atomic_add(2, &utd->outstanding_threads);
	}

	if (!wait_for_completion_timeout(&utd->outstanding_complete,
				THREADS_COMPLETION_TIMOUT)) {
		pr_err("%s: Multi-thread test timed-out %d threads left",
			__func__, atomic_read(&utd->outstanding_threads));
	}
	check_test_completion(test_iosched);

	/* clear random seed if changed */
	if (changed_seed)
		utd->random_test_seed = 0;

	return 0;
}
Exemple #6
0
ssize_t
argos_net_send_packet(struct argos_net_conn *conn, const struct pcap_pkthdr *h,
    const u_char *sp, uint8_t channel)
{
    if (conn->state == ARGOS_NET_CONN_DEAD) {
        orion_log_err("unable to send on DEAD network handle");
        errno = EBADF;
        return -1;
    }

    assert(conn->state != ARGOS_NET_CONN_IDLE);

    if (conn->shutdown) {
        /* illegal to try to send after argos_net_shutdown() is called */
        errno = EPIPE;
        return -1;
    }

    struct argos_net_pcap_msg msg;
    size_t reqlen = sizeof(msg) + h->caplen;

    msg.msgtype = htons(ARGOS_NET_PCAP_MSGTYPE);
    msg.msglen = htonl(reqlen);
    msg.channel = channel;
    msg.ts_sec = htonl(h->ts.tv_sec);
    msg.ts_usec = htonl(h->ts.tv_usec);
    msg.msglen = htonl(reqlen);
    msg.pktlen = htonl(h->len);
    msg.caplen = htonl(h->caplen);

    if (buffer_remaining(conn->pktbuf) < reqlen) {
        errno = ENOBUFS;
        return -1;
    }

    int rv = buffer_write(conn->pktbuf, &msg, sizeof(msg));
    assert(rv >= 0);

    rv = buffer_write(conn->pktbuf, sp, h->caplen);
    assert(rv >= 0);

    if (conn->compress_evt_reg == NULL) {
        static const struct timeval timeout = COMPRESS_DELAY_MAX;
        conn->compress_evt_reg = async_schedule(&timeout, compression_timeout,
            conn, 0);
    }

    return reqlen;
}
static int __init anx7816_init(void)
{
#if 0
    async_schedule(anx7816_init_async, NULL);
    return 0;
#else
    int ret = 0;
    printk("anx7816_init\n");
    ret = i2c_add_driver(&anx7816_driver);
    if (ret < 0)
        pr_err("%s: failed to register anx7816 i2c drivern", __func__);

    return ret;
#endif
}
Exemple #8
0
static void _invoke(const char* name, uint id, const char* msg) {
	async_enter_cs(invocation_cs);
	Invocation* inv = mempool_alloc(&invocation_pool);	
	async_leave_cs(invocation_cs);

	inv->id = id;
	if(msg) {
		assert(strlen(msg)+1 < 32);
		strcpy(inv->msg, msg);
	}
	else {
		inv->msg[0] = '\0';
	}
	inv->method = name;

	async_schedule(_invoke_task, 0, (void*)inv);
}
Exemple #9
0
static int ml_aitvaras_close(lua_State* l) {
	checkargs(0, "aitvaras.close");

	lua_getglobal(l, "aitvaras");

	const char* lobby_addr = _getstr(l, "lobby_addr");
	const char* server_addr = _getstr(l, "server_addr");
	char* remove_req = alloca(strlen(lobby_addr) + strlen("/remove") + 1);
	strcpy(remove_req, lobby_addr);
	strcat(remove_req, "/remove");

	if(server_id != -1) http_post(remove_req, false, server_addr, NULL, _remove_cb);

	mg_stop(mg_ctx);

	aatree_free(&clients);

	// Since some invocations might still be live, append
	// cleanup task to the end of the queue
	async_schedule(_cleanup_invocations, 0, NULL); 

	return 0;
}
static int __init hall_sensor_init(void)
{
	async_schedule(hall_sensor_init_async, NULL);
	return 0;
}
static int __init htc_headset_1wire_init(void)
{
    async_schedule(htc_headset_1wire_init_async, NULL);
    return 0;
}
Exemple #12
0
static int i915_load_modeset_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_parse_bios(dev);
	if (ret)
		DRM_INFO("failed to find VBIOS tables\n");

	/* If we have > 1 VGA cards, then we need to arbitrate access
	 * to the common VGA resources.
	 *
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
	 * then we do not take part in VGA arbitration and the
	 * vga_client_register() fails with -ENODEV.
	 */
	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
	if (ret && ret != -ENODEV)
		goto out;

	intel_register_dsm_handler();

	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
	if (ret)
		goto cleanup_vga_client;

	/* Initialise stolen first so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
	ret = i915_gem_init_stolen(dev);
	if (ret)
		goto cleanup_vga_switcheroo;

	intel_power_domains_init_hw(dev_priv);

	ret = intel_irq_install(dev_priv);
	if (ret)
		goto cleanup_gem_stolen;

	/* Important: The output setup functions called by modeset_init need
	 * working irqs for e.g. gmbus and dp aux transfers. */
	intel_modeset_init(dev);

	ret = i915_gem_init(dev);
	if (ret)
		goto cleanup_irq;

	intel_modeset_gem_init(dev);

	/* Always safe in the mode setting case. */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
	dev->vblank_disable_allowed = true;
	if (INTEL_INFO(dev)->num_pipes == 0)
		return 0;

	ret = intel_fbdev_init(dev);
	if (ret)
		goto cleanup_gem;

	/* Only enable hotplug handling once the fbdev is fully set up. */
	intel_hpd_init(dev_priv);

	/*
	 * Some ports require correctly set-up hpd registers for detection to
	 * work properly (leading to ghost connected connector status), e.g. VGA
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
	 * irqs are fully enabled. Now we should scan for the initial config
	 * only once hotplug handling is enabled, but due to screwed-up locking
	 * around kms/fbdev init we can't protect the fdbev initial config
	 * scanning against hotplug events. Hence do this first and ignore the
	 * tiny window where we will loose hotplug notifactions.
	 */
	async_schedule(intel_fbdev_initial_config, dev_priv);

	drm_kms_helper_poll_init(dev);

	return 0;

cleanup_gem:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
	i915_gem_context_fini(dev);
	mutex_unlock(&dev->struct_mutex);
cleanup_irq:
	drm_irq_uninstall(dev);
cleanup_gem_stolen:
	i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
	vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
	vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
	return ret;
}
static int __init anx7808_init(void)
{
	async_schedule(anx7808_init_async, NULL);
	return 0;
}
static int __init ov2722_init_module(void)
{
    async_schedule(ov2722_init_module_async, NULL);
    return 0;
}
static int __init mdm_modem_init(void)
{
	async_schedule(mdm_modem_init_async, NULL);
	return 0;
}
static int __init imx214_800m_fov87_init_module(void)
{

	async_schedule(imx214_800m_fov87_init_module_async, NULL);
	return 0;
}
Exemple #17
0
static int __init nas_init(void)
{
	async_schedule(nas_init_async, NULL);
	return 0;
}