Ejemplo n.º 1
0
int cx8800_restart_vbi_queue(struct cx8800_dev    *dev,
			     struct cx88_dmaqueue *q)
{
	struct cx88_buffer *buf;

	if (list_empty(&q->active))
		return 0;

	buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
	dprintk(2,"restart_queue [%p/%d]: restart dma\n",
		buf, buf->vb.i);
	cx8800_start_vbi_dma(dev, q, buf);
	list_for_each_entry(buf, &q->active, vb.queue)
		buf->count = q->count++;
	mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
	return 0;
}
Ejemplo n.º 2
0
void add_ipc_port_by_id (IDTYPE vmid, PAL_HANDLE hdl, int type,
                         port_fini fini, struct shim_ipc_port ** portptr)
{
    debug("adding port (handle %p) for process %u (type %04x)\n",
          hdl, vmid, type);

    assert(!!hdl && PAL_GET_TYPE(hdl));
    lock(ipc_helper_lock);

    struct hlist_head * head = vmid ? &ipc_port_pool[PID_HASH(vmid)] : NULL;
    struct shim_ipc_port * tmp, * port = NULL;
    struct hlist_node * pos;

    if (vmid)
        hlist_for_each_entry(tmp, pos, head, hlist)
            if (tmp->info.vmid == vmid && tmp->pal_handle == hdl) {
                port = tmp;
                __get_ipc_port(port);
                break;
            }

    if (!port)
        list_for_each_entry(tmp, &pobj_list, list)
            if (tmp->pal_handle == hdl) {
                port = tmp;
                __get_ipc_port(port);
                break;
            }

    if (!port && !(port = __get_new_ipc_port(hdl))) {
        *portptr = NULL;
        return;
    }

    bool need_restart = __add_ipc_port(port, vmid, type, fini);

    if (portptr)
        *portptr = port;
    else
        __put_ipc_port(port);

    unlock(ipc_helper_lock);

    if (need_restart)
        restart_ipc_helper(true);
}
Ejemplo n.º 3
0
static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
{
	struct spi_master *spi_cntrl;
	struct omap2_mcspi_cs *cs;
	spi_cntrl = mcspi->master;

	/* McSPI: context restore */
	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);

	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);

	list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
			node)
		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
Ejemplo n.º 4
0
static int do_make_slave(struct mount *mnt)
{
	struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
	struct mount *slave_mnt;

	/*
	 * slave 'mnt' to a peer mount that has the
	 * same root dentry. If none is available then
	 * slave it to anything that is available.
	 */
	while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
	       peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;

	if (peer_mnt == mnt) {
		peer_mnt = next_peer(mnt);
		if (peer_mnt == mnt)
			peer_mnt = NULL;
	}
	if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
		mnt_release_group_id(mnt);

	list_del_init(&mnt->mnt_share);
	mnt->mnt_group_id = 0;

	if (peer_mnt)
		master = peer_mnt;

	if (master) {
		list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
			slave_mnt->mnt_master = master;
		list_move(&mnt->mnt_slave, &master->mnt_slave_list);
		list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
		INIT_LIST_HEAD(&mnt->mnt_slave_list);
	} else {
		struct list_head *p = &mnt->mnt_slave_list;
		while (!list_empty(p)) {
                        slave_mnt = list_first_entry(p,
					struct mount, mnt_slave);
			list_del_init(&slave_mnt->mnt_slave);
			slave_mnt->mnt_master = NULL;
		}
	}
	mnt->mnt_master = master;
	CLEAR_MNT_SHARED(mnt);
	return 0;
}
Ejemplo n.º 5
0
/*
 * Prevents PCI Express ASPM (Active State Power Management) being enabled.
 *
 * Save the register offset, where the ASPM control bits are located,
 * for each PCI Express device that is in the device list of
 * the root port in an array for fast indexing. Replace the bus ops
 * with the modified one.
 */
static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
{
    int i;
    struct pci_bus  *pbus;
    struct pci_dev *dev;

    if ((pbus = pdev->subordinate) == NULL)
        return;

    /*
     * Check if the DID of pdev matches one of the six root ports. This
     * check is needed in the case this function is called directly by the
     * hot-plug driver.
     */
    if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
            (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
        return;

    if (list_empty(&pbus->devices)) {
        /*
         * If no device is attached to the root port at power-up or
         * after hot-remove, the pbus->devices is empty and this code
         * will set the offsets to zero and the bus ops to parent's bus
         * ops, which is unmodified.
         */
        for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
            quirk_aspm_offset[i] = 0;

        pci_bus_set_ops(pbus, pbus->parent->ops);
    } else {
        /*
         * If devices are attached to the root port at power-up or
         * after hot-add, the code loops through the device list of
         * each root port to save the register offsets and replace the
         * bus ops.
         */
        list_for_each_entry(dev, &pbus->devices, bus_list)
        /* There are 0 to 8 devices attached to this bus */
        quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
            dev->pcie_cap + PCI_EXP_LNKCTL;

        pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
        dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
    }

}
Ejemplo n.º 6
0
int mlx4_register_device(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_interface *intf;

	mutex_lock(&intf_mutex);

	list_add_tail(&priv->dev_list, &dev_list);
	list_for_each_entry(intf, &intf_list, list)
		mlx4_add_device(intf, priv);

	mutex_unlock(&intf_mutex);
	if (!mlx4_is_slave(dev))
		mlx4_start_catas_poll(dev);

	return 0;
}
Ejemplo n.º 7
0
/*
 *  Called after each bus is probed, but before its children
 *  are examined.
 */
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
	struct pci_dev *dev;

	if (bus->number == 0) {
		bus->resource[0] = &pci_ioport_resource;
		bus->resource[1] = &pci_iomem_resource;
	}

	if (bus->self) {
		pci_read_bridge_bases(bus);
		pcibios_fixup_device_resources(bus->self);
	}

	list_for_each_entry(dev, &bus->devices, bus_list)
		pcibios_fixup_device_resources(dev);
}
Ejemplo n.º 8
0
static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
{
	struct pcie_device *srv = context;
	struct pci_dev *port = srv->port;
	struct pci_dev *dev;

	/*
	 * Print status from downstream devices, not this root port or
	 * downstream switch port.
	 */
	down_read(&pci_bus_sem);
	list_for_each_entry(dev, &port->subordinate->devices, bus_list)
		pcie_report_downtraining(dev);
	up_read(&pci_bus_sem);

	return IRQ_HANDLED;
}
Ejemplo n.º 9
0
void au_plink_list(struct super_block *sb)
{
	struct au_sbinfo *sbinfo;
	struct list_head *plink_list;
	struct pseudo_link *plink;

	SiMustAnyLock(sb);

	sbinfo = au_sbi(sb);
	AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));

	plink_list = &sbinfo->si_plink.head;
	spin_lock(&sbinfo->si_plink.spin);
	list_for_each_entry(plink, plink_list, list)
		AuDbg("%lu\n", plink->inode->i_ino);
	spin_unlock(&sbinfo->si_plink.spin);
}
Ejemplo n.º 10
0
/*
 * Handle hotplug events outside the interrupt handler proper.
 */
static void radeon_hotplug_work_func(struct work_struct *work)
{
	struct radeon_device *rdev = container_of(work, struct radeon_device,
						  hotplug_work);
	struct drm_device *dev = rdev->ddev;
	struct drm_mode_config *mode_config = &dev->mode_config;
	struct drm_connector *connector;

	mutex_lock(&mode_config->mutex);
	if (mode_config->num_connector) {
		list_for_each_entry(connector, &mode_config->connector_list, head)
			radeon_connector_hotplug(connector);
	}
	mutex_unlock(&mode_config->mutex);
	/* Just fire off a uevent and let userspace tell us what to do */
	drm_helper_hpd_irq_event(dev);
}
Ejemplo n.º 11
0
int mlx4_register_interface(struct mlx4_interface *intf)
{
	struct mlx4_priv *priv;

	if (!intf->add || !intf->remove)
		return -EINVAL;

	mutex_lock(&intf_mutex);

	list_add_tail(&intf->list, &intf_list);
	list_for_each_entry(priv, &dev_list, dev_list)
		mlx4_add_device(intf, priv);

	mutex_unlock(&intf_mutex);

	return 0;
}
Ejemplo n.º 12
0
static int soc_compr_free_fe(struct snd_compr_stream *cstream)
{
    struct snd_soc_pcm_runtime *fe = cstream->private_data;
    struct snd_soc_platform *platform = fe->platform;
    struct snd_soc_dpcm *dpcm;
    int stream, ret;

    mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);

    if (cstream->direction == SND_COMPRESS_PLAYBACK)
        stream = SNDRV_PCM_STREAM_PLAYBACK;
    else
        stream = SNDRV_PCM_STREAM_CAPTURE;

    snd_soc_runtime_deactivate(fe, stream);

    fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;

    ret = dpcm_be_dai_hw_free(fe, stream);
    if (ret < 0)
        dev_err(fe->dev, "compressed hw_free failed %d\n", ret);

    ret = dpcm_be_dai_shutdown(fe, stream);

    /* mark FE's links ready to prune */
    list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
    dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;

    dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);

    fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
    fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;

    dpcm_be_disconnect(fe, stream);

    fe->dpcm[stream].runtime = NULL;

    if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
        fe->dai_link->compr_ops->shutdown(cstream);

    if (platform->driver->compr_ops && platform->driver->compr_ops->free)
        platform->driver->compr_ops->free(cstream);

    mutex_unlock(&fe->card->mutex);
    return 0;
}
Ejemplo n.º 13
0
/**
 *	cdv_save_display_registers	-	save registers lost on suspend
 *	@dev: our DRM device
 *
 *	Save the state we need in order to be able to restore the interface
 *	upon resume from suspend
 */
static int cdv_save_display_registers(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct psb_save_area *regs = &dev_priv->regs;
	struct drm_connector *connector;

	dev_dbg(dev->dev, "Saving GPU registers.\n");

	pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);

	regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
	regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);

	regs->cdv.saveDSPARB = REG_READ(DSPARB);
	regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
	regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
	regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
	regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
	regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
	regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);

	regs->cdv.saveADPA = REG_READ(ADPA);

	regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
	regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
	regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
	regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
	regs->cdv.saveLVDS = REG_READ(LVDS);

	regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);

	regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
	regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
	regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);

	regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);

	regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
	regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);

	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
		connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);

	return 0;
}
Ejemplo n.º 14
0
static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
				     struct pt_regs *regs)
{
	int ret = 0;
	struct brcmstb_gisb_arb_device *gdev;

	/* iterate over each GISB arb registered handlers */
	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
		ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
	/*
	 * If it was an imprecise abort, then we need to correct the
	 * return address to be _after_ the instruction.
	*/
	if (fsr & (1 << 10))
		regs->ARM_pc += 4;

	return ret;
}
Ejemplo n.º 15
0
static int initialize_workers(struct conn_data *data)
{
	INIT_LIST_HEAD(&data->workers);

	for (int i = 0; i < NUM_WORKERS; i++) {
		if (start_new_worker(data))
			goto err;
	}

	return 0;

	struct conn_worker_list *w;
err:
	list_for_each_entry(w, &data->workers, list)
		free(w);

	return -1;
}       
Ejemplo n.º 16
0
static void histo_stop_streaming(struct vb2_queue *vq)
{
	struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
	struct vsp1_histogram_buffer *buffer;
	unsigned long flags;

	spin_lock_irqsave(&histo->irqlock, flags);

	/* Remove all buffers from the IRQ queue. */
	list_for_each_entry(buffer, &histo->irqqueue, queue)
		vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
	INIT_LIST_HEAD(&histo->irqqueue);

	/* Wait for the buffer being read out (if any) to complete. */
	wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);

	spin_unlock_irqrestore(&histo->irqlock, flags);
}
Ejemplo n.º 17
0
static int exynos_drm_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *drm_dev = node->minor->dev;
	struct exynos_drm_gem_info_data gem_info_data;

	gem_info_data.m = m;

	seq_printf(gem_info_data.m, "pid \ttgid \thandle \trefcount \thcount "\
				"\tsize \t\tflags \tpage_size \tpfnmap \t"\
				"exyport_to_fd \timport_from_fd\n");

	list_for_each_entry(gem_info_data.filp, &drm_dev->filelist, lhead)
		idr_for_each(&gem_info_data.filp->object_idr,
				exynos_drm_gem_one_info, &gem_info_data);

	return 0;
}
Ejemplo n.º 18
0
void intel_fb_restore_mode(struct drm_device *dev)
{
	int ret;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_mode_config *config = &dev->mode_config;
	struct drm_plane *plane;

	rw_enter_write(&dev->mode_config.rwl);

	ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
	if (ret)
		DRM_DEBUG("failed to restore crtc mode\n");

	/* Be sure to shut off any planes that may be active */
	list_for_each_entry(plane, &config->plane_list, head)
		plane->funcs->disable_plane(plane);

	rw_exit_write(&dev->mode_config.rwl);
}
Ejemplo n.º 19
0
static size_t record_to_str(module_t *mod) {
    login_user_t *entry = NULL;
    size_t ret = 0;

    if (list_empty(&mod->record_list))
        return -1;

    list_for_each_entry(entry, &mod->record_list, list)
    ret += snprintf(
                mod->record + ret,
                LEN_1M,
                "%s=%s,%s,%s,%ld;\n",
                name,
                entry->user,
                entry->tty,
                entry->from,
                entry->login_time);
    return ret;
}
Ejemplo n.º 20
0
int add_mtd_device(struct mtd_info *mtd)
{
	int i;

	BUG_ON(mtd->writesize == 0);
	mutex_lock(&mtd_table_mutex);

	for (i=0; i < MAX_MTD_DEVICES; i++)
		if (!mtd_table[i]) {
			struct mtd_notifier *not;

			mtd_table[i] = mtd;
			mtd->index = i;
			mtd->usecount = 0;

			/* Some chips always power up locked. Unlock them now */
			if ((mtd->flags & MTD_WRITEABLE)
			    && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
				if (mtd->unlock(mtd, 0, mtd->size))
					printk(KERN_WARNING
					       "%s: unlock failed, "
					       "writes may not work\n",
					       mtd->name);
			}

			DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
			/* No need to get a refcount on the module containing
			   the notifier, since we hold the mtd_table_mutex */
			list_for_each_entry(not, &mtd_notifiers, list)
				not->add(mtd);

			mutex_unlock(&mtd_table_mutex);
			/* We _know_ we aren't being removed, because
			   our caller is still holding us here. So none
			   of this try_ nonsense, and no bitching about it
			   either. :) */
			__module_get(THIS_MODULE);
			return 0;
		}

	mutex_unlock(&mtd_table_mutex);
	return 1;
}
Ejemplo n.º 21
0
void intel_fb_restore_mode(struct drm_device *dev)
{
    int ret;
    drm_i915_private_t *dev_priv = dev->dev_private;
    struct drm_mode_config *config = &dev->mode_config;
    struct drm_plane *plane;

    mutex_lock(&dev->mode_config.mutex);

    ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
    if (ret)
        DRM_DEBUG("failed to restore crtc mode\n");


    list_for_each_entry(plane, &config->plane_list, head)
    plane->funcs->disable_plane(plane);

    mutex_unlock(&dev->mode_config.mutex);
}
Ejemplo n.º 22
0
static void restart_level_changed(void)
{
	struct subsys_data *subsys;

	if (cpu_is_msm8x60() && restart_level == RESET_SUBSYS_COUPLED) {
		restart_orders = orders_8x60_all;
		n_restart_orders = ARRAY_SIZE(orders_8x60_all);
	}

	if (cpu_is_msm8x60() && restart_level == RESET_SUBSYS_MIXED) {
		restart_orders = orders_8x60_modems;
		n_restart_orders = ARRAY_SIZE(orders_8x60_modems);
	}

	mutex_lock(&subsystem_list_lock);
	list_for_each_entry(subsys, &subsystem_list, list)
		subsys->restart_order = _update_restart_order(subsys);
	mutex_unlock(&subsystem_list_lock);
}
Ejemplo n.º 23
0
void input_report_key_event(struct input_device *idev, unsigned int code, int value)
{
	struct input_event event;
	struct input_notifier *in;

	if (code > KEY_MAX)
		return;

	if (value)
		set_bit(code, &idev->keys);
	else
		clear_bit(code, &idev->keys);

	event.code = code;
	event.value = value;

	list_for_each_entry(in, &input_consumers, list)
		in->notify(in, &event);
}
Ejemplo n.º 24
0
/*
 * Unregister a cec device node
 *
 * This unregisters the passed device. Future open calls will be met with
 * errors.
 *
 * This function can safely be called if the device node has never been
 * registered or has already been unregistered.
 */
static void cec_devnode_unregister(struct cec_devnode *devnode)
{
	struct cec_fh *fh;

	/* Check if devnode was never registered or already unregistered */
	if (!devnode->registered || devnode->unregistered)
		return;

	mutex_lock(&devnode->fhs_lock);
	list_for_each_entry(fh, &devnode->fhs, list)
		wake_up_interruptible(&fh->wait);
	mutex_unlock(&devnode->fhs_lock);

	devnode->registered = false;
	devnode->unregistered = true;
	device_del(&devnode->dev);
	cdev_del(&devnode->cdev);
	put_device(&devnode->dev);
}
Ejemplo n.º 25
0
Archivo: mod_ip.c Proyecto: fifilyu/zsr
static size_t record_to_str(module_t *mod) {
    interface_t *entry = NULL;
    size_t ret = 0;

    if (list_empty(&mod->record_list))
        return -1;

    list_for_each_entry(entry, &mod->record_list, list)
        ret += sprintf(
                    mod->record + ret,
                    "%s=%s,%s,%s,%d;\n",
                    name,
                    entry->dev,
                    entry->ip_addr,
                    entry->netmask,
                    entry->ip_ver);

    return ret;
}
Ejemplo n.º 26
0
/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}
Ejemplo n.º 27
0
/*
 * power management
 */
static int hda_tegra_suspend(struct device *dev)
{
	struct snd_card *card = dev_get_drvdata(dev);
	struct azx *chip = card->private_data;
	struct azx_pcm *p;
	struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);

	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
	list_for_each_entry(p, &chip->pcm_list, list)
		snd_pcm_suspend_all(p->pcm);
	if (chip->initialized)
		snd_hda_suspend(chip->bus);

	azx_stop_chip(chip);
	azx_enter_link_reset(chip);
	hda_tegra_disable_clocks(hda);

	return 0;
}
Ejemplo n.º 28
0
static int
client_packet(const void *data, size_t length, void *ctx)
{
    struct connection *c = ctx;
    packet_action_t action;
    struct linked_server *ls;

    assert(c->client.client != NULL);

    action = handle_packet_from_server(server_packet_bindings,
                                       c, data, length);
    switch (action) {
    case PA_ACCEPT:
        if (!c->client.reconnecting)
            list_for_each_entry(ls, &c->servers, siblings)
                if (!ls->attaching && !ls->is_zombie)
                    uo_server_send(ls->server, data, length);
        break;

    case PA_DROP:
        break;

    case PA_DISCONNECT:
        log(2, "aborting connection to server after packet 0x%x\n",
            *(const unsigned char*)data);
        log_hexdump(6, data, length);

        if (c->autoreconnect && c->in_game) {
            log(2, "auto-reconnecting\n");
            connection_disconnect(c);
            connection_reconnect_delayed(c);
        } else {
            connection_delete(c);
        }
        return -1;

    case PA_DELETED:
        return -1;
    }

    return 0;
}
static int vp_in_stop_streaming(struct vb2_queue *vq)
{
	struct vcap_client_data *c_data = vb2_get_drv_priv(vq);
	struct vb2_buffer *vb;

	dprintk(2, "VP stop streaming\n");

	while (!list_empty(&c_data->vid_vp_action.in_active)) {
		struct vcap_buffer *buf;
		buf = list_entry(c_data->vid_vp_action.in_active.next,
			struct vcap_buffer, list);
		list_del(&buf->list);
		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
	}

	/* clean ion handles */
	list_for_each_entry(vb, &vq->queued_list, queued_entry)
		free_ion_handle_work(c_data->dev, vb);
	return 0;
}
Ejemplo n.º 30
0
void isert_portal_release(struct isert_portal *portal)
{
	struct isert_connection *conn;

	pr_info("iser portal cm_id:%p releasing\n", portal->cm_id);

	if (portal->cm_id) {
		rdma_destroy_id(portal->cm_id);
		portal->cm_id = NULL;
	}

	isert_portal_list_remove(portal);

	mutex_lock(&dev_list_mutex);
	list_for_each_entry(conn, &portal->conn_list, portal_node)
		isert_conn_disconnect(conn);
	portal->state = ISERT_PORTAL_INACTIVE;
	isert_portal_free(portal);
	mutex_unlock(&dev_list_mutex);
}