Esempio n. 1
0
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
    return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
Esempio n. 2
0
/*
 * functions called from protocol stack drivers
 * to be EXPORT-ed
 */
long st_register(struct st_proto_s *new_proto)
{
	struct st_data_s	*st_gdata;
	long err = 0;
	unsigned long flags = 0;

	st_kim_ref(&st_gdata, 0);
	pr_info("%s(%d) ", __func__, new_proto->chnl_id);
	if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
	    || new_proto->reg_complete_cb == NULL) {
		pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
		return -EINVAL;
	}

	if (new_proto->chnl_id >= ST_MAX_CHANNELS) {
		pr_err("chnl_id %d not supported", new_proto->chnl_id);
		return -EPROTONOSUPPORT;
	}

	if (st_gdata->is_registered[new_proto->chnl_id] == true) {
		pr_err("chnl_id %d already registered", new_proto->chnl_id);
		return -EALREADY;
	}

	/* can be from process context only */
	spin_lock_irqsave(&st_gdata->lock, flags);

	if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) {
		pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->chnl_id);
		/* fw download in progress */

		add_channel_to_table(st_gdata, new_proto);
		st_gdata->protos_registered++;
		new_proto->write = st_write;

		set_bit(ST_REG_PENDING, &st_gdata->st_state);
		spin_unlock_irqrestore(&st_gdata->lock, flags);
		return -EINPROGRESS;
	} else if (st_gdata->protos_registered == ST_EMPTY) {
		pr_info(" chnl_id list empty :%d ", new_proto->chnl_id);
		set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
		st_recv = st_kim_recv;

		/* release lock previously held - re-locked below */
		spin_unlock_irqrestore(&st_gdata->lock, flags);

		/* enable the ST LL - to set default chip state */
		st_ll_enable(st_gdata);
		/* this may take a while to complete
		 * since it involves BT fw download
		 */
		err = st_kim_start(st_gdata->kim_data);
		if (err != 0) {
			clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
			if ((st_gdata->protos_registered != ST_EMPTY) &&
			    (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
				pr_err(" KIM failure complete callback ");
				st_reg_complete(st_gdata, err);
			}
			return -EINVAL;
		}

		clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
		st_recv = st_int_recv;

		/* this is where all pending registration
		 * are signalled to be complete by calling callback functions
		 */
		if ((st_gdata->protos_registered != ST_EMPTY) &&
		    (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
			pr_debug(" call reg complete callback ");
			st_reg_complete(st_gdata, 0);
		}
		clear_bit(ST_REG_PENDING, &st_gdata->st_state);

		/* check for already registered once more,
		 * since the above check is old
		 */
		if (st_gdata->is_registered[new_proto->chnl_id] == true) {
			pr_err(" proto %d already registered ",
				   new_proto->chnl_id);
			return -EALREADY;
		}

		spin_lock_irqsave(&st_gdata->lock, flags);
		add_channel_to_table(st_gdata, new_proto);
		st_gdata->protos_registered++;
		new_proto->write = st_write;
		spin_unlock_irqrestore(&st_gdata->lock, flags);
		return err;
	}
	/* if fw is already downloaded & new stack registers protocol */
	else {
		add_channel_to_table(st_gdata, new_proto);
		st_gdata->protos_registered++;
		new_proto->write = st_write;

		/* lock already held before entering else */
		spin_unlock_irqrestore(&st_gdata->lock, flags);
		return err;
	}
	pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
}
int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
{
	u32 data[CX2341X_MBOX_MAX_DATA];
	struct ivtv *itv = s->itv;
	int captype = 0, subtype = 0;
	int enable_passthrough = 0;

	if (s->vdev == NULL)
		return -EINVAL;

	IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);

	switch (s->type) {
	case IVTV_ENC_STREAM_TYPE_MPG:
		captype = 0;
		subtype = 3;

		
		if (itv->output_mode == OUT_PASSTHROUGH) {
			ivtv_passthrough_mode(itv, 0);
			enable_passthrough = 1;
		}
		itv->mpg_data_received = itv->vbi_data_inserted = 0;
		itv->dualwatch_jiffies = jiffies;
		itv->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
		itv->search_pack_header = 0;
		break;

	case IVTV_ENC_STREAM_TYPE_YUV:
		if (itv->output_mode == OUT_PASSTHROUGH) {
			captype = 2;
			subtype = 11;	
			break;
		}
		captype = 1;
		subtype = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_PCM:
		captype = 1;
		subtype = 2;
		break;
	case IVTV_ENC_STREAM_TYPE_VBI:
		captype = 1;
		subtype = 4;

		itv->vbi.frame = 0;
		itv->vbi.inserted_frame = 0;
		memset(itv->vbi.sliced_mpeg_size,
			0, sizeof(itv->vbi.sliced_mpeg_size));
		break;
	default:
		return -EINVAL;
	}
	s->subtype = subtype;
	s->buffers_stolen = 0;

	
	clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);

	if (atomic_read(&itv->capturing) == 0) {
		int digitizer;

		
		
		ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 1, 1);

		
		ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, 0);
		ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 3, !itv->has_cx23415);
		ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 8, 0);
		ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 4, 1);
		ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);

		
		ivtv_vapi(itv, CX2341X_ENC_SET_PLACEHOLDER, 12,
			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);

		if (itv->card->hw_all & (IVTV_HW_SAA7115 | IVTV_HW_SAA717X))
		    digitizer = 0xF1;
		else if (itv->card->hw_all & IVTV_HW_SAA7114)
		    digitizer = 0xEF;
		else 
		    digitizer = 0x140;

		ivtv_vapi(itv, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, digitizer, digitizer);

		
		if (itv->v4l2_cap & V4L2_CAP_VBI_CAPTURE) {
			ivtv_vbi_setup(itv);
		}

		
		ivtv_vapi_result(itv, data, CX2341X_ENC_SET_PGM_INDEX_INFO, 2, 7, 400);
		itv->pgm_info_offset = data[0];
		itv->pgm_info_num = data[1];
		itv->pgm_info_write_idx = 0;
		itv->pgm_info_read_idx = 0;

		IVTV_DEBUG_INFO("PGM Index at 0x%08x with %d elements\n",
				itv->pgm_info_offset, itv->pgm_info_num);

		
		cx2341x_handler_setup(&itv->cxhdl);

		
		if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
			ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
				1 | (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
	}

	
	if (itv->has_cx23415 && !test_and_set_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
		
		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_ENC_VIM_RST, -1);
		ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
	}

	if (atomic_read(&itv->capturing) == 0) {
		
		ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);

		clear_bit(IVTV_F_I_EOS, &itv->i_flags);

		cx2341x_handler_set_busy(&itv->cxhdl, 1);

		
		
		v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
		
		v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
		ivtv_msleep_timeout(300, 0);
		ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
		v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
	}

	
	if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
	{
		IVTV_DEBUG_WARN( "Error starting capture!\n");
		return -EINVAL;
	}

	
	if (enable_passthrough) {
		ivtv_passthrough_mode(itv, 1);
	}

	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
		ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
	else
		ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);

	
	atomic_inc(&itv->capturing);
	return 0;
}
Esempio n. 4
0
static int acpi_battery_get_property(struct power_supply *psy,
				     enum power_supply_property psp,
				     union power_supply_propval *val)
{
	int ret = 0;
	struct acpi_battery *battery = to_acpi_battery(psy);

	if (acpi_battery_present(battery)) {
		/* run battery update only if it is present */
		acpi_battery_get_state(battery);
	} else if (psp != POWER_SUPPLY_PROP_PRESENT)
		return -ENODEV;
	switch (psp) {
	case POWER_SUPPLY_PROP_STATUS:
		if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
		else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
			val->intval = POWER_SUPPLY_STATUS_CHARGING;
		else if (acpi_battery_is_charged(battery))
			val->intval = POWER_SUPPLY_STATUS_FULL;
		else
			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
		break;
	case POWER_SUPPLY_PROP_PRESENT:
		val->intval = acpi_battery_present(battery);
		break;
	case POWER_SUPPLY_PROP_TECHNOLOGY:
		val->intval = acpi_battery_technology(battery);
		break;
	case POWER_SUPPLY_PROP_CYCLE_COUNT:
		val->intval = battery->cycle_count;
		break;
	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
		if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->design_voltage * 1000;
		break;
	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
		if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->voltage_now * 1000;
		break;
	case POWER_SUPPLY_PROP_CURRENT_NOW:
	case POWER_SUPPLY_PROP_POWER_NOW:
		if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->rate_now * 1000;
		break;
	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
		if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->design_capacity * 1000;
		break;
	case POWER_SUPPLY_PROP_CHARGE_FULL:
	case POWER_SUPPLY_PROP_ENERGY_FULL:
		if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->full_charge_capacity * 1000;
		break;
	case POWER_SUPPLY_PROP_CHARGE_NOW:
	case POWER_SUPPLY_PROP_ENERGY_NOW:
		if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
			ret = -ENODEV;
		else
			val->intval = battery->capacity_now * 1000;
		break;
	case POWER_SUPPLY_PROP_CAPACITY:
		if (battery->capacity_now && battery->full_charge_capacity)
			val->intval = battery->capacity_now * 100/
					battery->full_charge_capacity;
		else
			val->intval = 0;
		break;
	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
		if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
		else if (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
			(battery->capacity_now <= battery->alarm))
			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
		else if (acpi_battery_is_charged(battery))
			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
		else
			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
		break;
	case POWER_SUPPLY_PROP_MODEL_NAME:
		val->strval = battery->model_number;
		break;
	case POWER_SUPPLY_PROP_MANUFACTURER:
		val->strval = battery->oem_info;
		break;
	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
		val->strval = battery->serial_number;
		break;
	default:
		ret = -EINVAL;
	}
	return ret;
}
int musb_hub_control(
	struct usb_hcd	*hcd,
	u16		typeReq,
	u16		wValue,
	u16		wIndex,
	char		*buf,
	u16		wLength)
{
	struct musb	*musb = hcd_to_musb(hcd);
	u32		temp;
	int		retval = 0;
	unsigned long	flags;

	spin_lock_irqsave(&musb->lock, flags);

	if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
		spin_unlock_irqrestore(&musb->lock, flags);
		return -ESHUTDOWN;
	}

	/* hub features:  always zero, setting is a NOP
	 * port features: reported, sometimes updated when host is active
	 * no indicators
	 */
	switch (typeReq) {
	case ClearHubFeature:
	case SetHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
		case C_HUB_LOCAL_POWER:
			break;
		default:
			goto error;
		}
		break;
	case ClearPortFeature:
		if ((wIndex & 0xff) != 1)
			goto error;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			break;
		case USB_PORT_FEAT_SUSPEND:
			musb_port_suspend(musb, false);
			break;
		case USB_PORT_FEAT_POWER:
			if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
				musb_set_vbus(musb, 0);
			break;
		case USB_PORT_FEAT_C_CONNECTION:
		case USB_PORT_FEAT_C_ENABLE:
		case USB_PORT_FEAT_C_OVER_CURRENT:
		case USB_PORT_FEAT_C_RESET:
		case USB_PORT_FEAT_C_SUSPEND:
			break;
		default:
			goto error;
		}
		DBG(5, "clear feature %d\n", wValue);
		musb->port1_status &= ~(1 << wValue);
		break;
	case GetHubDescriptor:
		{
		struct usb_hub_descriptor *desc = (void *)buf;

		desc->bDescLength = 9;
		desc->bDescriptorType = 0x29;
		desc->bNbrPorts = 1;
		desc->wHubCharacteristics = cpu_to_le16(
				  0x0001	/* per-port power switching */
				| 0x0010	/* no overcurrent reporting */
				);
		desc->bPwrOn2PwrGood = 5;	/* msec/2 */
		desc->bHubContrCurrent = 0;

		/* workaround bogus struct definition */
		desc->DeviceRemovable[0] = 0x02;	/* port 1 */
		desc->DeviceRemovable[1] = 0xff;
		}
		break;
	case GetHubStatus:
		temp = 0;
		*(__le32 *) buf = cpu_to_le32(temp);
		break;
	case GetPortStatus:
		if (wIndex != 1)
			goto error;

		/* finish RESET signaling? */
		if ((musb->port1_status & USB_PORT_STAT_RESET)
				&& time_after_eq(jiffies, musb->rh_timer))
			musb_port_reset(musb, false);

		/* finish RESUME signaling? */
		if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
				&& time_after_eq(jiffies, musb->rh_timer)) {
			u8		power;

			power = musb_readb(musb->mregs, MUSB_POWER);
			power &= ~MUSB_POWER_RESUME;
			DBG(4, "root port resume stopped, power %02x\n",
					power);
			musb_writeb(musb->mregs, MUSB_POWER, power);

			/* ISSUE:  DaVinci (RTL 1.300) disconnects after
			 * resume of high speed peripherals (but not full
			 * speed ones).
			 */

			musb->is_active = 1;
			musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
					| MUSB_PORT_STAT_RESUME);
			musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
			usb_hcd_poll_rh_status(musb_to_hcd(musb));
			/* NOTE: it might really be A_WAIT_BCON ... */
			musb->xceiv->state = OTG_STATE_A_HOST;
		}

		put_unaligned(cpu_to_le32(musb->port1_status
					& ~MUSB_PORT_STAT_RESUME),
				(__le32 *) buf);

		/* port change status is more interesting */
		DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
				musb->port1_status);
		break;
	case SetPortFeature:
		if ((wIndex & 0xff) != 1)
			goto error;

		switch (wValue) {
		case USB_PORT_FEAT_POWER:
			/* NOTE: this controller has a strange state machine
			 * that involves "requesting sessions" according to
			 * magic side effects from incompletely-described
			 * rules about startup...
			 *
			 * This call is what really starts the host mode; be
			 * very careful about side effects if you reorder any
			 * initialization logic, e.g. for OTG, or change any
			 * logic relating to VBUS power-up.
			 */
			if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
				musb_start(musb);
			break;
		case USB_PORT_FEAT_RESET:
			musb_port_reset(musb, true);
			break;
		case USB_PORT_FEAT_SUSPEND:
			musb_port_suspend(musb, true);
			break;
		case USB_PORT_FEAT_TEST:
			if (unlikely(is_host_active(musb)))
				goto error;

			wIndex >>= 8;
			switch (wIndex) {
			case 1:
				pr_debug("TEST_J\n");
				temp = MUSB_TEST_J;
				break;
			case 2:
				pr_debug("TEST_K\n");
				temp = MUSB_TEST_K;
				break;
			case 3:
				pr_debug("TEST_SE0_NAK\n");
				temp = MUSB_TEST_SE0_NAK;
				break;
			case 4:
				pr_debug("TEST_PACKET\n");
				temp = MUSB_TEST_PACKET;
				musb_load_testpacket(musb);
				break;
			case 5:
				pr_debug("TEST_FORCE_ENABLE\n");
				temp = MUSB_TEST_FORCE_HOST
					| MUSB_TEST_FORCE_HS;

				musb_writeb(musb->mregs, MUSB_DEVCTL,
						MUSB_DEVCTL_SESSION);
				break;
			case 6:
				pr_debug("TEST_FIFO_ACCESS\n");
				temp = MUSB_TEST_FIFO_ACCESS;
				break;
			default:
				goto error;
			}
			musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
			break;
		default:
			goto error;
		}
		DBG(5, "set feature %d\n", wValue);
		musb->port1_status |= 1 << wValue;
		break;

	default:
error:
		/* "protocol stall" on error */
		retval = -EPIPE;
	}
	spin_unlock_irqrestore(&musb->lock, flags);
	return retval;
}
Esempio n. 6
0
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
    struct ivtv_open_id *id = filp->private_data;
    struct ivtv *itv = id->itv;
    struct ivtv_stream *s = &itv->streams[id->type];
    struct yuv_playback_info *yi = &itv->yuv_info;
    struct ivtv_buffer *buf;
    struct ivtv_queue q;
    int bytes_written = 0;
    int mode;
    int rc;
    DEFINE_WAIT(wait);

    IVTV_DEBUG_HI_FILE("write %zd bytes to %s\n", count, s->name);

    if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
            s->type != IVTV_DEC_STREAM_TYPE_YUV &&
            s->type != IVTV_DEC_STREAM_TYPE_VOUT)
        /* not decoder streams */
        return -EPERM;

    /* Try to claim this stream */
    if (ivtv_claim_stream(id, s->type))
        return -EBUSY;

    /* This stream does not need to start any decoding */
    if (s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
        int elems = count / sizeof(struct v4l2_sliced_vbi_data);

        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
        ivtv_write_vbi(itv, (const struct v4l2_sliced_vbi_data *)user_buf, elems);
        return elems * sizeof(struct v4l2_sliced_vbi_data);
    }

    mode = s->type == IVTV_DEC_STREAM_TYPE_MPG ? OUT_MPG : OUT_YUV;

    if (ivtv_set_output_mode(itv, mode) != mode) {
        ivtv_release_stream(s);
        return -EBUSY;
    }
    ivtv_queue_init(&q);
    set_bit(IVTV_F_S_APPL_IO, &s->s_flags);

    /* Start decoder (returns 0 if already started) */
    mutex_lock(&itv->serialize_lock);
    rc = ivtv_start_decoding(id, itv->speed);
    mutex_unlock(&itv->serialize_lock);
    if (rc) {
        IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);

        /* failure, clean up */
        clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
        clear_bit(IVTV_F_S_APPL_IO, &s->s_flags);
        return rc;
    }

retry:
    /* If possible, just DMA the entire frame - Check the data transfer size
    since we may get here before the stream has been fully set-up */
    if (mode == OUT_YUV && s->q_full.length == 0 && itv->dma_data_req_size) {
        while (count >= itv->dma_data_req_size) {
            if (!ivtv_yuv_udma_stream_frame (itv, (void __user *)user_buf)) {
                bytes_written += itv->dma_data_req_size;
                user_buf += itv->dma_data_req_size;
                count -= itv->dma_data_req_size;
            } else {
                break;
            }
        }
        if (count == 0) {
            IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
            return bytes_written;
        }
    }

    for (;;) {
        /* Gather buffers */
        while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
            ivtv_enqueue(s, buf, &q);
        while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
            ivtv_enqueue(s, buf, &q);
        }
        if (q.buffers)
            break;
        if (filp->f_flags & O_NONBLOCK)
            return -EAGAIN;
        prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
        /* New buffers might have become free before we were added to the waitqueue */
        if (!s->q_free.buffers)
            schedule();
        finish_wait(&s->waitq, &wait);
        if (signal_pending(current)) {
            IVTV_DEBUG_INFO("User stopped %s\n", s->name);
            return -EINTR;
        }
    }

    /* copy user data into buffers */
    while ((buf = ivtv_dequeue(s, &q))) {
        /* yuv is a pain. Don't copy more data than needed for a single
           frame, otherwise we lose sync with the incoming stream */
        if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
                yi->stream_size + count > itv->dma_data_req_size)
            rc  = ivtv_buf_copy_from_user(s, buf, user_buf,
                                          itv->dma_data_req_size - yi->stream_size);
        else
            rc = ivtv_buf_copy_from_user(s, buf, user_buf, count);

        /* Make sure we really got all the user data */
        if (rc < 0) {
            ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
            return rc;
        }
        user_buf += rc;
        count -= rc;
        bytes_written += rc;

        if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
            yi->stream_size += rc;
            /* If we have a complete yuv frame, break loop now */
            if (yi->stream_size == itv->dma_data_req_size) {
                ivtv_enqueue(s, buf, &s->q_full);
                yi->stream_size = 0;
                break;
            }
        }

        if (buf->bytesused != s->buf_size) {
            /* incomplete, leave in q_io for next time */
            ivtv_enqueue(s, buf, &s->q_io);
            break;
        }
        /* Byteswap MPEG buffer */
        if (s->type == IVTV_DEC_STREAM_TYPE_MPG)
            ivtv_buf_swap(buf);
        ivtv_enqueue(s, buf, &s->q_full);
    }

    if (test_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags)) {
        if (s->q_full.length >= itv->dma_data_req_size) {
            int got_sig;

            if (mode == OUT_YUV)
                ivtv_yuv_setup_stream_frame(itv);

            prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
            while (!(got_sig = signal_pending(current)) &&
                    test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
                schedule();
            }
            finish_wait(&itv->dma_waitq, &wait);
            if (got_sig) {
                IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
                return -EINTR;
            }

            clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
            ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
            ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 1);
        }
    }
    /* more user data is available, wait until buffers become free
       to transfer the rest. */
    if (count && !(filp->f_flags & O_NONBLOCK))
        goto retry;
    IVTV_DEBUG_HI_FILE("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
    return bytes_written;
}
Esempio n. 7
0
static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
{
    struct ivtv *itv = s->itv;
    struct ivtv_open_id *item;

    IVTV_DEBUG_FILE("open %s\n", s->name);

    if (s->type == IVTV_DEC_STREAM_TYPE_MPG &&
            test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_YUV].s_flags))
        return -EBUSY;

    if (s->type == IVTV_DEC_STREAM_TYPE_YUV &&
            test_bit(IVTV_F_S_CLAIMED, &itv->streams[IVTV_DEC_STREAM_TYPE_MPG].s_flags))
        return -EBUSY;

    if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
        if (read_reg(0x82c) == 0) {
            IVTV_ERR("Tried to open YUV output device but need to send data to mpeg decoder before it can be used\n");
            /* return -ENODEV; */
        }
        ivtv_udma_alloc(itv);
    }

    /* Allocate memory */
    item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
    if (NULL == item) {
        IVTV_DEBUG_WARN("nomem on v4l2 open\n");
        return -ENOMEM;
    }
    item->itv = itv;
    item->type = s->type;
    v4l2_prio_open(&itv->prio, &item->prio);

    item->open_id = itv->open_id++;
    filp->private_data = item;

    if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
        /* Try to claim this stream */
        if (ivtv_claim_stream(item, item->type)) {
            /* No, it's already in use */
            kfree(item);
            return -EBUSY;
        }

        if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
            if (atomic_read(&itv->capturing) > 0) {
                /* switching to radio while capture is
                   in progress is not polite */
                ivtv_release_stream(s);
                kfree(item);
                return -EBUSY;
            }
        }
        /* Mark that the radio is being used. */
        set_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
        /* We have the radio */
        ivtv_mute(itv);
        /* Switch tuner to radio */
        ivtv_call_i2c_clients(itv, AUDC_SET_RADIO, NULL);
        /* Select the correct audio input (i.e. radio tuner) */
        ivtv_audio_set_io(itv);
        if (itv->hw_flags & IVTV_HW_SAA711X)
        {
            struct v4l2_crystal_freq crystal_freq;
            crystal_freq.freq = SAA7115_FREQ_32_11_MHZ;
            crystal_freq.flags = SAA7115_FREQ_FL_APLL;
            ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq);
        }
        /* Done! Unmute and continue. */
        ivtv_unmute(itv);
    }

    /* YUV or MPG Decoding Mode? */
    if (s->type == IVTV_DEC_STREAM_TYPE_MPG) {
        clear_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
    } else if (s->type == IVTV_DEC_STREAM_TYPE_YUV) {
        set_bit(IVTV_F_I_DEC_YUV, &itv->i_flags);
        /* For yuv, we need to know the dma size before we start */
        itv->dma_data_req_size =
            1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
        itv->yuv_info.stream_size = 0;
    }
    return 0;
}
Esempio n. 8
0
static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
	int rc = 0;
	struct key *sidkey;
	const struct cred *saved_cred;
	struct cifs_sid *lsid;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -EINVAL;

	spin_lock(cidlock);
	psidid = sid_rb_search(cidtree, cid);

	if (!psidid) { /* node does not exist, allocate one & attempt adding */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = sid_rb_search(cidtree, cid);
		if (psidid) { /* node happened to get inserted meanwhile */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			sid_rb_insert(cidtree, cid, &psidid,
					sidtype == SIDOWNER ? "oi:" : "gi:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
	 * If we are here, it is safe to access psidid and its fields
	 * since a reference was taken earlier while holding the spinlock.
	 * A reference on the node is put without holding the spinlock
	 * and it is OK to do so in this case, shrinker will not erase
	 * this node until all references are put and we do not access
	 * any fields of the node after a reference is put .
	 */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		psidid->time = jiffies; /* update ts for accessing */
		goto id_sid_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
		rc = -EINVAL;
		goto id_sid_out;
	}

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(sidkey)) {
			rc = -EINVAL;
			cFYI(1, "%s: Can't map and id to a SID", __func__);
		} else {
			lsid = (struct cifs_sid *)sidkey->payload.data;
			memcpy(&psidid->sid, lsid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			memcpy(ssid, &psidid->sid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(sidkey);
			kfree(psidid->sidstr);
		}
		psidid->time = jiffies; /* update ts for accessing */
		revert_creds(saved_cred);
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount;
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		else
			rc = -EINVAL;
	}
id_sid_out:
	--psidid->refcount;
	return rc;
}
Esempio n. 9
0
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
		struct cifs_fattr *fattr, uint sidtype)
{
	int rc;
	unsigned long cid;
	struct key *idkey;
	const struct cred *saved_cred;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -ENOENT;

	spin_lock(cidlock);
	psidid = id_rb_search(cidtree, psid);

	if (!psidid) { /* node does not exist, allocate one & attempt adding */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = id_rb_search(cidtree, psid);
		if (psidid) { /* node happened to get inserted meanwhile */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			id_rb_insert(cidtree, psid, &psidid,
					sidtype == SIDOWNER ? "os:" : "gs:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
	 * If we are here, it is safe to access psidid and its fields
	 * since a reference was taken earlier while holding the spinlock.
	 * A reference on the node is put without holding the spinlock
	 * and it is OK to do so in this case, shrinker will not erase
	 * this node until all references are put and we do not access
	 * any fields of the node after a reference is put .
	 */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		cid = psidid->id;
		psidid->time = jiffies; /* update ts for accessing */
		goto sid_to_id_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
		goto sid_to_id_out;

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(idkey))
			cFYI(1, "%s: Can't map SID to an id", __func__);
		else {
			cid = *(unsigned long *)idkey->payload.value;
			psidid->id = cid;
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(idkey);
			kfree(psidid->sidstr);
		}
		revert_creds(saved_cred);
		psidid->time = jiffies; /* update ts for accessing */
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount; /* decremented without spinlock */
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			cid = psidid->id;
	}

sid_to_id_out:
	--psidid->refcount; /* decremented without spinlock */
	if (sidtype == SIDOWNER)
		fattr->cf_uid = cid;
	else
		fattr->cf_gid = cid;

	return 0;
}
Esempio n. 10
0
/* This is the common part of the URB message submission code
 *
 * All URBs from the usb-storage driver involved in handling a queued scsi
 * command _must_ pass through this function (or something like it) for the
 * abort mechanisms to work properly.
 */
static int usb_stor_msg_common(struct us_data *us, int timeout)
{
	struct completion urb_done;
	long timeleft;
	int status;

	/* don't submit URBs during abort processing */
	if (test_bit(US_FLIDX_ABORTING, &us->dflags))
		return -EIO;

	/* set up data structures for the wakeup system */
	init_completion(&urb_done);

	/* fill the common fields in the URB */
	us->current_urb->context = &urb_done;
	us->current_urb->transfer_flags = 0;

	/* we assume that if transfer_buffer isn't us->iobuf then it
	 * hasn't been mapped for DMA.  Yes, this is clunky, but it's
	 * easier than always having the caller tell us whether the
	 * transfer buffer has already been mapped. */
	if (us->current_urb->transfer_buffer == us->iobuf)
		us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	us->current_urb->transfer_dma = us->iobuf_dma;

	/* submit the URB */
	status = usb_submit_urb(us->current_urb, GFP_NOIO);
	if (status) {
		/* something went wrong */
		return status;
	}

	/* since the URB has been submitted successfully, it's now okay
	 * to cancel it */
	set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

	/* did an abort occur during the submission? */
	if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {

		/* cancel the URB, if it hasn't been cancelled already */
		if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
			usb_stor_dbg(us, "-- cancelling URB\n");
			usb_unlink_urb(us->current_urb);
		}
	}
 
	/* wait for the completion of the URB */
	timeleft = wait_for_completion_interruptible_timeout(
			&urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);
 
	clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);

	if (timeleft <= 0) {
		usb_stor_dbg(us, "%s -- cancelling URB\n",
			     timeleft == 0 ? "Timeout" : "Signal");
		usb_kill_urb(us->current_urb);
	}

	/* return the URB status */
	return us->current_urb->status;
}
Esempio n. 11
0
/* Invoke the transport and basic error-handling/recovery methods
 *
 * This is used by the protocol layers to actually send the message to
 * the device and receive the response.
 */
void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
	int need_auto_sense;
	int result;

	/* send the command to the transport layer */
	scsi_set_resid(srb, 0);
	result = us->transport(srb, us);

	/* if the command gets aborted by the higher layers, we need to
	 * short-circuit all other processing
	 */
	if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
		usb_stor_dbg(us, "-- command was aborted\n");
#ifdef CONFIG_USB_DEBUG_DETAILED_LOG
		printk(KERN_ERR "usb storage -- command was aborted\n");
#endif
		srb->result = DID_ABORT << 16;
		goto Handle_Errors;
	}

	/* if there is a transport error, reset and don't auto-sense */
	if (result == USB_STOR_TRANSPORT_ERROR) {
		usb_stor_dbg(us, "-- transport indicates error, resetting\n");
#ifdef CONFIG_USB_DEBUG_DETAILED_LOG
		printk(KERN_ERR "usb storage -- transport indicates error, resetting\n");
#endif
		srb->result = DID_ERROR << 16;
		goto Handle_Errors;
	}

	/* if the transport provided its own sense data, don't auto-sense */
	if (result == USB_STOR_TRANSPORT_NO_SENSE) {
		srb->result = SAM_STAT_CHECK_CONDITION;
		last_sector_hacks(us, srb);
		return;
	}

	srb->result = SAM_STAT_GOOD;

	/* Determine if we need to auto-sense
	 *
	 * I normally don't use a flag like this, but it's almost impossible
	 * to understand what's going on here if I don't.
	 */
	need_auto_sense = 0;

	/*
	 * If we're running the CB transport, which is incapable
	 * of determining status on its own, we will auto-sense
	 * unless the operation involved a data-in transfer.  Devices
	 * can signal most data-in errors by stalling the bulk-in pipe.
	 */
	if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_DPCM_USB) &&
			srb->sc_data_direction != DMA_FROM_DEVICE) {
		usb_stor_dbg(us, "-- CB transport device requiring auto-sense\n");
		need_auto_sense = 1;
	}

	/*
	 * If we have a failure, we're going to do a REQUEST_SENSE 
	 * automatically.  Note that we differentiate between a command
	 * "failure" and an "error" in the transport mechanism.
	 */
	if (result == USB_STOR_TRANSPORT_FAILED) {
		usb_stor_dbg(us, "-- transport indicates command failure\n");
		need_auto_sense = 1;
	}

	/*
	 * Determine if this device is SAT by seeing if the
	 * command executed successfully.  Otherwise we'll have
	 * to wait for at least one CHECK_CONDITION to determine
	 * SANE_SENSE support
	 */
	if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
	    result == USB_STOR_TRANSPORT_GOOD &&
	    !(us->fflags & US_FL_SANE_SENSE) &&
	    !(us->fflags & US_FL_BAD_SENSE) &&
	    !(srb->cmnd[2] & 0x20))) {
		usb_stor_dbg(us, "-- SAT supported, increasing auto-sense\n");
		us->fflags |= US_FL_SANE_SENSE;
	}

	/*
	 * A short transfer on a command where we don't expect it
	 * is unusual, but it doesn't mean we need to auto-sense.
	 */
	if ((scsi_get_resid(srb) > 0) &&
	    !((srb->cmnd[0] == REQUEST_SENSE) ||
	      (srb->cmnd[0] == INQUIRY) ||
	      (srb->cmnd[0] == MODE_SENSE) ||
	      (srb->cmnd[0] == LOG_SENSE) ||
	      (srb->cmnd[0] == MODE_SENSE_10))) {
		usb_stor_dbg(us, "-- unexpectedly short transfer\n");
	}

	/* Now, if we need to do the auto-sense, let's do it */
	if (need_auto_sense) {
		int temp_result;
		struct scsi_eh_save ses;
		int sense_size = US_SENSE_SIZE;
		struct scsi_sense_hdr sshdr;
		const u8 *scdd;
		u8 fm_ili;

		/* device supports and needs bigger sense buffer */
		if (us->fflags & US_FL_SANE_SENSE)
			sense_size = ~0;
Retry_Sense:
		usb_stor_dbg(us, "Issuing auto-REQUEST_SENSE\n");

		scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);

		/* FIXME: we must do the protocol translation here */
		if (us->subclass == USB_SC_RBC || us->subclass == USB_SC_SCSI ||
				us->subclass == USB_SC_CYP_ATACB)
			srb->cmd_len = 6;
		else
			srb->cmd_len = 12;

		/* issue the auto-sense command */
		scsi_set_resid(srb, 0);
		temp_result = us->transport(us->srb, us);

		/* let's clean up right away */
		scsi_eh_restore_cmnd(srb, &ses);

		if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
			usb_stor_dbg(us, "-- auto-sense aborted\n");
#ifdef CONFIG_USB_DEBUG_DETAILED_LOG
			printk(KERN_ERR "usb storage -- auto-sense aborted\n");
#endif
			srb->result = DID_ABORT << 16;

			/* If SANE_SENSE caused this problem, disable it */
			if (sense_size != US_SENSE_SIZE) {
				us->fflags &= ~US_FL_SANE_SENSE;
				us->fflags |= US_FL_BAD_SENSE;
			}
			goto Handle_Errors;
		}

		/* Some devices claim to support larger sense but fail when
		 * trying to request it. When a transport failure happens
		 * using US_FS_SANE_SENSE, we always retry with a standard
		 * (small) sense request. This fixes some USB GSM modems
		 */
		if (temp_result == USB_STOR_TRANSPORT_FAILED &&
				sense_size != US_SENSE_SIZE) {
			usb_stor_dbg(us, "-- auto-sense failure, retry small sense\n");
			sense_size = US_SENSE_SIZE;
			us->fflags &= ~US_FL_SANE_SENSE;
			us->fflags |= US_FL_BAD_SENSE;
			goto Retry_Sense;
		}

		/* Other failures */
		if (temp_result != USB_STOR_TRANSPORT_GOOD) {
			usb_stor_dbg(us, "-- auto-sense failure\n");
#ifdef CONFIG_USB_DEBUG_DETAILED_LOG
			printk(KERN_ERR "usb storage -- auto-sense failure\n");
#endif

			/* we skip the reset if this happens to be a
			 * multi-target device, since failure of an
			 * auto-sense is perfectly valid
			 */
			srb->result = DID_ERROR << 16;
			if (!(us->fflags & US_FL_SCM_MULT_TARG))
				goto Handle_Errors;
			return;
		}

		/* If the sense data returned is larger than 18-bytes then we
		 * assume this device supports requesting more in the future.
		 * The response code must be 70h through 73h inclusive.
		 */
		if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
		    !(us->fflags & US_FL_SANE_SENSE) &&
		    !(us->fflags & US_FL_BAD_SENSE) &&
		    (srb->sense_buffer[0] & 0x7C) == 0x70) {
			usb_stor_dbg(us, "-- SANE_SENSE support enabled\n");
			us->fflags |= US_FL_SANE_SENSE;

			/* Indicate to the user that we truncated their sense
			 * because we didn't know it supported larger sense.
			 */
			usb_stor_dbg(us, "-- Sense data truncated to %i from %i\n",
				     US_SENSE_SIZE,
				     srb->sense_buffer[7] + 8);
			srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
		}

		scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
				     &sshdr);

		usb_stor_dbg(us, "-- Result from auto-sense is %d\n",
			     temp_result);
		usb_stor_dbg(us, "-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
			     sshdr.response_code, sshdr.sense_key,
			     sshdr.asc, sshdr.ascq);
#ifdef CONFIG_USB_STORAGE_DEBUG
		usb_stor_show_sense(us, sshdr.sense_key, sshdr.asc, sshdr.ascq);
#endif

		/* set the result so the higher layers expect this data */
		srb->result = SAM_STAT_CHECK_CONDITION;

		scdd = scsi_sense_desc_find(srb->sense_buffer,
					    SCSI_SENSE_BUFFERSIZE, 4);
		fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;

		/* We often get empty sense data.  This could indicate that
		 * everything worked or that there was an unspecified
		 * problem.  We have to decide which.
		 */
		if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
		    fm_ili == 0) {
			/* If things are really okay, then let's show that.
			 * Zero out the sense buffer so the higher layers
			 * won't realize we did an unsolicited auto-sense.
			 */
			if (result == USB_STOR_TRANSPORT_GOOD) {
				srb->result = SAM_STAT_GOOD;
				srb->sense_buffer[0] = 0x0;

			/* If there was a problem, report an unspecified
			 * hardware error to prevent the higher layers from
			 * entering an infinite retry loop.
			 */
			} else {
				srb->result = DID_ERROR << 16;
				if ((sshdr.response_code & 0x72) == 0x72)
					srb->sense_buffer[1] = HARDWARE_ERROR;
				else
					srb->sense_buffer[2] = HARDWARE_ERROR;
			}
		}
	}

	/*
	 * Some devices don't work or return incorrect data the first
	 * time they get a READ(10) command, or for the first READ(10)
	 * after a media change.  If the INITIAL_READ10 flag is set,
	 * keep track of whether READ(10) commands succeed.  If the
	 * previous one succeeded and this one failed, set the REDO_READ10
	 * flag to force a retry.
	 */
	if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
			srb->cmnd[0] == READ_10)) {
		if (srb->result == SAM_STAT_GOOD) {
			set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
		} else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
			clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
			set_bit(US_FLIDX_REDO_READ10, &us->dflags);
		}

		/*
		 * Next, if the REDO_READ10 flag is set, return a result
		 * code that will cause the SCSI core to retry the READ(10)
		 * command immediately.
		 */
		if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
			clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
			srb->result = DID_IMM_RETRY << 16;
			srb->sense_buffer[0] = 0;
		}
	}

	/* Did we transfer less than the minimum amount required? */
	if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
			scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
		srb->result = DID_ERROR << 16;

	last_sector_hacks(us, srb);
	return;

	/* Error and abort processing: try to resynchronize with the device
	 * by issuing a port reset.  If that fails, try a class-specific
	 * device reset. */
  Handle_Errors:

	/* Set the RESETTING bit, and clear the ABORTING bit so that
	 * the reset may proceed. */
	scsi_lock(us_to_host(us));
	set_bit(US_FLIDX_RESETTING, &us->dflags);
	clear_bit(US_FLIDX_ABORTING, &us->dflags);
	scsi_unlock(us_to_host(us));

	/* We must release the device lock because the pre_reset routine
	 * will want to acquire it. */
	mutex_unlock(&us->dev_mutex);
#ifdef CONFIG_USB_STORAGE_DETECT
	msleep(200);
#endif
	result = usb_stor_port_reset(us);
	mutex_lock(&us->dev_mutex);

	if (result < 0) {
		scsi_lock(us_to_host(us));
		usb_stor_report_device_reset(us);
		scsi_unlock(us_to_host(us));
		us->transport_reset(us);
	}
	clear_bit(US_FLIDX_RESETTING, &us->dflags);
	last_sector_hacks(us, srb);
}
Esempio n. 12
0
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
{
	struct ath6kl_bmi_target_info targ_info;
	struct wireless_dev *wdev;
	int ret = 0, i;

	switch (htc_type) {
	case ATH6KL_HTC_TYPE_MBOX:
		ath6kl_htc_mbox_attach(ar);
		break;
	case ATH6KL_HTC_TYPE_PIPE:
		ath6kl_htc_pipe_attach(ar);
		break;
	default:
		WARN_ON(1);
		return -ENOMEM;
	}

	ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
	if (!ar->ath6kl_wq)
		return -ENOMEM;

	ret = ath6kl_bmi_init(ar);
	if (ret)
		goto err_wq;

	/*
	 * Turn on power to get hardware (target) version and leave power
	 * on delibrately as we will boot the hardware anyway within few
	 * seconds.
	 */
	ret = ath6kl_hif_power_on(ar);
	if (ret)
		goto err_bmi_cleanup;

	ret = ath6kl_bmi_get_target_info(ar, &targ_info);
	if (ret)
		goto err_power_off;

	ar->version.target_ver = le32_to_cpu(targ_info.version);
	ar->target_type = le32_to_cpu(targ_info.type);
	ar->wiphy->hw_version = le32_to_cpu(targ_info.version);

	ret = ath6kl_init_hw_params(ar);
	if (ret)
		goto err_power_off;

	ar->htc_target = ath6kl_htc_create(ar);

	if (!ar->htc_target) {
		ret = -ENOMEM;
		goto err_power_off;
	}

	ar->testmode = testmode;

	ret = ath6kl_init_fetch_firmwares(ar);
	if (ret)
		goto err_htc_cleanup;

	/* FIXME: we should free all firmwares in the error cases below */

	/* Indicate that WMI is enabled (although not ready yet) */
	set_bit(WMI_ENABLED, &ar->flag);
	ar->wmi = ath6kl_wmi_init(ar);
	if (!ar->wmi) {
		ath6kl_err("failed to initialize wmi\n");
		ret = -EIO;
		goto err_htc_cleanup;
	}

	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);

	/* setup access class priority mappings */
	ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest  */
	ar->ac_stream_pri_map[WMM_AC_BE] = 1;
	ar->ac_stream_pri_map[WMM_AC_VI] = 2;
	ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */

	/* allocate some buffers that handle larger AMSDU frames */
	ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);

	ath6kl_cookie_init(ar);

	ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
			 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;

	if (suspend_mode &&
	    suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
	    suspend_mode <= WLAN_POWER_STATE_WOW)
		ar->suspend_mode = suspend_mode;
	else
		ar->suspend_mode = 0;

	if (suspend_mode == WLAN_POWER_STATE_WOW &&
	    (wow_mode == WLAN_POWER_STATE_CUT_PWR ||
	     wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
		ar->wow_suspend_mode = wow_mode;
	else
		ar->wow_suspend_mode = 0;

	if (uart_debug)
		ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;

	set_bit(FIRST_BOOT, &ar->flag);

	ath6kl_debug_init(ar);

	ret = ath6kl_init_hw_start(ar);
	if (ret) {
		ath6kl_err("Failed to start hardware: %d\n", ret);
		goto err_rxbuf_cleanup;
	}

	/* give our connected endpoints some buffers */
	ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
	ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);

	ret = ath6kl_cfg80211_init(ar);
	if (ret)
		goto err_rxbuf_cleanup;

	ret = ath6kl_debug_init_fs(ar);
	if (ret) {
		wiphy_unregister(ar->wiphy);
		goto err_rxbuf_cleanup;
	}

	for (i = 0; i < ar->vif_max; i++)
		ar->avail_idx_map |= BIT(i);

	rtnl_lock();

	/* Add an initial station interface */
	wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
				    INFRA_NETWORK);

	rtnl_unlock();

	if (!wdev) {
		ath6kl_err("Failed to instantiate a network device\n");
		ret = -ENOMEM;
		wiphy_unregister(ar->wiphy);
		goto err_rxbuf_cleanup;
	}

	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
		   __func__, wdev->netdev->name, wdev->netdev, ar);

	ar->fw_recovery.enable = !!recovery_enable;
	if (!ar->fw_recovery.enable)
		return ret;

	if (heart_beat_poll &&
	    test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
		     ar->fw_capabilities))
		ar->fw_recovery.hb_poll = heart_beat_poll;

	ath6kl_recovery_init(ar);

	return ret;

err_rxbuf_cleanup:
	ath6kl_debug_cleanup(ar);
	ath6kl_htc_flush_rx_buf(ar->htc_target);
	ath6kl_cleanup_amsdu_rxbufs(ar);
	ath6kl_wmi_shutdown(ar->wmi);
	clear_bit(WMI_ENABLED, &ar->flag);
	ar->wmi = NULL;
err_htc_cleanup:
	ath6kl_htc_cleanup(ar->htc_target);
err_power_off:
	ath6kl_hif_power_off(ar);
err_bmi_cleanup:
	ath6kl_bmi_cleanup(ar);
err_wq:
	destroy_workqueue(ar->ath6kl_wq);

	return ret;
}
static int __init ehci_hcd_init(void)
{
	int retval = 0;

	if (usb_disabled())
		return -ENODEV;

	printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
	set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
	if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
			test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
		printk(KERN_WARNING "Warning! ehci_hcd should always be loaded"
				" before uhci_hcd and ohci_hcd, not after\n");

	pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
		 hcd_name,
		 sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
		 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));

#ifdef DEBUG
	ehci_debug_root = debugfs_create_dir("ehci", NULL);
	if (!ehci_debug_root) {
		retval = -ENOENT;
		goto err_debug;
	}
#endif

#ifdef PLATFORM_DRIVER
pr_debug("#ifdef PLATFORM_DRIVER\n");
	retval = platform_driver_register(&PLATFORM_DRIVER);
	if (retval < 0)
		goto clean0;
#endif

#ifdef PCI_DRIVER
	retval = pci_register_driver(&PCI_DRIVER);
	if (retval < 0)
		goto clean1;
#endif

#ifdef PS3_SYSTEM_BUS_DRIVER
	retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
	if (retval < 0)
		goto clean2;
#endif

#ifdef OF_PLATFORM_DRIVER
	retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
	if (retval < 0)
		goto clean3;
#endif
	return retval;

#ifdef OF_PLATFORM_DRIVER
	/* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */
clean3:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
	ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
clean2:
#endif
#ifdef PCI_DRIVER
	pci_unregister_driver(&PCI_DRIVER);
clean1:
#endif
#ifdef PLATFORM_DRIVER
	platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
#ifdef DEBUG
	debugfs_remove(ehci_debug_root);
	ehci_debug_root = NULL;
err_debug:
#endif
	clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
	return retval;
}
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
	u32			status, masked_status, pcd_status = 0, cmd;
	int			bh;

	spin_lock (&ehci->lock);

	status = ehci_readl(ehci, &ehci->regs->status);

	/* e.g. cardbus physical eject */
	if (status == ~(u32) 0) {
		ehci_dbg (ehci, "device removed\n");
		goto dead;
	}

	masked_status = status & INTR_MASK;
	if (!masked_status) {		/* irq sharing? */
		spin_unlock(&ehci->lock);
		return IRQ_NONE;
	}

	/* clear (just) interrupts */
	ehci_writel(ehci, masked_status, &ehci->regs->status);
	cmd = ehci_readl(ehci, &ehci->regs->command);
	bh = 0;

#ifdef	VERBOSE_DEBUG
	/* unrequested/ignored: Frame List Rollover */
	dbg_status (ehci, "irq", status);
#endif

	/* INT, ERR, and IAA interrupt rates can be throttled */

	/* normal [4.15.1.2] or error [4.15.1.1] completion */
	if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
		if (likely ((status & STS_ERR) == 0))
			COUNT (ehci->stats.normal);
		else
			COUNT (ehci->stats.error);
		bh = 1;
	}

	/* complete the unlinking of some qh [4.15.2.3] */
	if (status & STS_IAA) {
		/* guard against (alleged) silicon errata */
		if (cmd & CMD_IAAD) {
			ehci_writel(ehci, cmd & ~CMD_IAAD,
					&ehci->regs->command);
			ehci_dbg(ehci, "IAA with IAAD still set?\n");
		}
		if (ehci->reclaim) {
			COUNT(ehci->stats.reclaim);
			end_unlink_async(ehci);
		} else
			ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
	}

	/* remote wakeup [4.3.1] */
	if (status & STS_PCD) {
		unsigned	i = HCS_N_PORTS (ehci->hcs_params);

		/* kick root hub later */
		pcd_status = status;

		/* resume root hub? */
		if (!(cmd & CMD_RUN))
			usb_hcd_resume_root_hub(hcd);

		while (i--) {
			int pstatus = ehci_readl(ehci,
						 &ehci->regs->port_status [i]);

			if (pstatus & PORT_OWNER)
				continue;
			if (!(test_bit(i, &ehci->suspended_ports) &&
					((pstatus & PORT_RESUME) ||
						!(pstatus & PORT_SUSPEND)) &&
					(pstatus & PORT_PE) &&
					ehci->reset_done[i] == 0))
				continue;

			/* start 20 msec resume signaling from this port,
			 * and make khubd collect PORT_STAT_C_SUSPEND to
			 * stop that signaling.
			 */
			ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
			mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
		}
	}

	/* PCI errors [4.15.2.4] */
	if (unlikely ((status & STS_FATAL) != 0)) {
		ehci_err(ehci, "fatal error\n");
		dbg_cmd(ehci, "fatal", cmd);
		dbg_status(ehci, "fatal", status);
		ehci_halt(ehci);
dead:
		ehci_reset(ehci);
		ehci_writel(ehci, 0, &ehci->regs->configured_flag);
		/* generic layer kills/unlinks all urbs, then
		 * uses ehci_stop to clean up the rest
		 */
		bh = 1;
	}

	if (bh)
		ehci_work (ehci);
	spin_unlock (&ehci->lock);
	if (pcd_status)
		usb_hcd_poll_rh_status(hcd);
	return IRQ_HANDLED;
}
Esempio n. 15
0
static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, int *err)
{
    struct ivtv *itv = s->itv;
    struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
    struct ivtv_buffer *buf;
    DEFINE_WAIT(wait);

    *err = 0;
    while (1) {
        if (s->type == IVTV_ENC_STREAM_TYPE_MPG) {
            /* Process pending program info updates and pending VBI data */
            ivtv_update_pgm_info(itv);

            if (time_after(jiffies,
                           itv->dualwatch_jiffies +
                           msecs_to_jiffies(1000))) {
                itv->dualwatch_jiffies = jiffies;
                ivtv_dualwatch(itv);
            }

            if (test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
                    !test_bit(IVTV_F_S_APPL_IO, &s_vbi->s_flags)) {
                while ((buf = ivtv_dequeue(s_vbi, &s_vbi->q_full))) {
                    /* byteswap and process VBI data */
                    ivtv_process_vbi_data(itv, buf, s_vbi->dma_pts, s_vbi->type);
                    ivtv_enqueue(s_vbi, buf, &s_vbi->q_free);
                }
            }
            buf = &itv->vbi.sliced_mpeg_buf;
            if (buf->readpos != buf->bytesused) {
                return buf;
            }
        }

        /* do we have leftover data? */
        buf = ivtv_dequeue(s, &s->q_io);
        if (buf)
            return buf;

        /* do we have new data? */
        buf = ivtv_dequeue(s, &s->q_full);
        if (buf) {
            if ((buf->b_flags & IVTV_F_B_NEED_BUF_SWAP) == 0)
                return buf;
            buf->b_flags &= ~IVTV_F_B_NEED_BUF_SWAP;
            if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
                /* byteswap MPG data */
                ivtv_buf_swap(buf);
            else if (s->type != IVTV_DEC_STREAM_TYPE_VBI) {
                /* byteswap and process VBI data */
                ivtv_process_vbi_data(itv, buf, s->dma_pts, s->type);
            }
            return buf;
        }

        /* return if end of stream */
        if (s->type != IVTV_DEC_STREAM_TYPE_VBI && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
            IVTV_DEBUG_INFO("EOS %s\n", s->name);
            return NULL;
        }

        /* return if file was opened with O_NONBLOCK */
        if (non_block) {
            *err = -EAGAIN;
            return NULL;
        }

        /* wait for more data to arrive */
        prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
        /* New buffers might have become available before we were added to the waitqueue */
        if (!s->q_full.buffers)
            schedule();
        finish_wait(&s->waitq, &wait);
        if (signal_pending(current)) {
            /* return if a signal was received */
            IVTV_DEBUG_INFO("User stopped %s\n", s->name);
            *err = -EINTR;
            return NULL;
        }
    }
}
Esempio n. 16
0
/* size of returned buffer is part of USB spec */
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
			u16 wIndex, char *buf, u16 wLength)
{
	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
	int status, lstatus, retval = 0;
	unsigned int port = wIndex - 1;
	unsigned long port_addr = USBPORTSC1 + 2 * port;
	u16 wPortChange, wPortStatus;
	unsigned long flags;

	if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead)
		return -ETIMEDOUT;

	spin_lock_irqsave(&uhci->lock, flags);
	switch (typeReq) {

	case GetHubStatus:
		*(__le32 *)buf = cpu_to_le32(0);
		retval = 4; /* hub power */
		break;
	case GetPortStatus:
		if (port >= uhci->rh_numports)
			goto err;

		uhci_check_ports(uhci);
		status = uhci_readw(uhci, port_addr);

		/* Intel controllers report the OverCurrent bit active on.
		 * VIA controllers report it active off, so we'll adjust the
		 * bit value.  (It's not standardized in the UHCI spec.)
		 */
		if (uhci->oc_low)
			status ^= USBPORTSC_OC;

		/* UHCI doesn't support C_RESET (always false) */
		wPortChange = lstatus = 0;
		if (status & USBPORTSC_CSC)
			wPortChange |= USB_PORT_STAT_C_CONNECTION;
		if (status & USBPORTSC_PEC)
			wPortChange |= USB_PORT_STAT_C_ENABLE;
		if ((status & USBPORTSC_OCC) && !ignore_oc)
			wPortChange |= USB_PORT_STAT_C_OVERCURRENT;

		if (test_bit(port, &uhci->port_c_suspend)) {
			wPortChange |= USB_PORT_STAT_C_SUSPEND;
			lstatus |= 1;
		}
		if (test_bit(port, &uhci->resuming_ports))
			lstatus |= 4;

		/* UHCI has no power switching (always on) */
		wPortStatus = USB_PORT_STAT_POWER;
		if (status & USBPORTSC_CCS)
			wPortStatus |= USB_PORT_STAT_CONNECTION;
		if (status & USBPORTSC_PE) {
			wPortStatus |= USB_PORT_STAT_ENABLE;
			if (status & SUSPEND_BITS)
				wPortStatus |= USB_PORT_STAT_SUSPEND;
		}
		if (status & USBPORTSC_OC)
			wPortStatus |= USB_PORT_STAT_OVERCURRENT;
		if (status & USBPORTSC_PR)
			wPortStatus |= USB_PORT_STAT_RESET;
		if (status & USBPORTSC_LSDA)
			wPortStatus |= USB_PORT_STAT_LOW_SPEED;

		if (wPortChange)
			dev_dbg(uhci_dev(uhci), "port %d portsc %04x,%02x\n",
					wIndex, status, lstatus);

		*(__le16 *)buf = cpu_to_le16(wPortStatus);
		*(__le16 *)(buf + 2) = cpu_to_le16(wPortChange);
		retval = 4;
		break;
	case SetHubFeature:		/* We don't implement these */
	case ClearHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
		case C_HUB_LOCAL_POWER:
			break;
		default:
			goto err;
		}
		break;
	case SetPortFeature:
		if (port >= uhci->rh_numports)
			goto err;

		switch (wValue) {
		case USB_PORT_FEAT_SUSPEND:
			SET_RH_PORTSTAT(USBPORTSC_SUSP);
			break;
		case USB_PORT_FEAT_RESET:
			SET_RH_PORTSTAT(USBPORTSC_PR);

			/* Reset terminates Resume signalling */
			uhci_finish_suspend(uhci, port, port_addr);

			/* USB v2.0 7.1.7.5 */
			uhci->ports_timeout = jiffies +
				msecs_to_jiffies(USB_RESUME_TIMEOUT);
			break;
		case USB_PORT_FEAT_POWER:
			/* UHCI has no power switching */
			break;
		default:
			goto err;
		}
		break;
	case ClearPortFeature:
		if (port >= uhci->rh_numports)
			goto err;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			CLR_RH_PORTSTAT(USBPORTSC_PE);

			/* Disable terminates Resume signalling */
			uhci_finish_suspend(uhci, port, port_addr);
			break;
		case USB_PORT_FEAT_C_ENABLE:
			CLR_RH_PORTSTAT(USBPORTSC_PEC);
			break;
		case USB_PORT_FEAT_SUSPEND:
			if (!(uhci_readw(uhci, port_addr) & USBPORTSC_SUSP)) {

				/* Make certain the port isn't suspended */
				uhci_finish_suspend(uhci, port, port_addr);
			} else if (!test_and_set_bit(port,
						&uhci->resuming_ports)) {
				SET_RH_PORTSTAT(USBPORTSC_RD);

				/* The controller won't allow RD to be set
				 * if the port is disabled.  When this happens
				 * just skip the Resume signalling.
				 */
				if (!(uhci_readw(uhci, port_addr) &
						USBPORTSC_RD))
					uhci_finish_suspend(uhci, port,
							port_addr);
				else
					/* USB v2.0 7.1.7.7 */
					uhci->ports_timeout = jiffies +
						msecs_to_jiffies(20);
			}
			break;
		case USB_PORT_FEAT_C_SUSPEND:
			clear_bit(port, &uhci->port_c_suspend);
			break;
		case USB_PORT_FEAT_POWER:
			/* UHCI has no power switching */
			goto err;
		case USB_PORT_FEAT_C_CONNECTION:
			CLR_RH_PORTSTAT(USBPORTSC_CSC);
			break;
		case USB_PORT_FEAT_C_OVER_CURRENT:
			CLR_RH_PORTSTAT(USBPORTSC_OCC);
			break;
		case USB_PORT_FEAT_C_RESET:
			/* this driver won't report these */
			break;
		default:
			goto err;
		}
		break;
	case GetHubDescriptor:
		retval = min_t(unsigned int, sizeof(root_hub_hub_des), wLength);
		memcpy(buf, root_hub_hub_des, retval);
		if (retval > 2)
			buf[2] = uhci->rh_numports;
		break;
	default:
err:
		retval = -EPIPE;
	}
	spin_unlock_irqrestore(&uhci->lock, flags);

	return retval;
}
Esempio n. 17
0
int ivtv_start_capture(struct ivtv_open_id *id)
{
    struct ivtv *itv = id->itv;
    struct ivtv_stream *s = &itv->streams[id->type];
    struct ivtv_stream *s_vbi;

    if (s->type == IVTV_ENC_STREAM_TYPE_RAD ||
            s->type == IVTV_DEC_STREAM_TYPE_MPG ||
            s->type == IVTV_DEC_STREAM_TYPE_YUV ||
            s->type == IVTV_DEC_STREAM_TYPE_VOUT) {
        /* you cannot read from these stream types. */
        return -EPERM;
    }

    /* Try to claim this stream. */
    if (ivtv_claim_stream(id, s->type))
        return -EBUSY;

    /* This stream does not need to start capturing */
    if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
        return 0;
    }

    /* If capture is already in progress, then we also have to
       do nothing extra. */
    if (test_bit(IVTV_F_S_STREAMOFF, &s->s_flags) || test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
        return 0;
    }

    /* Start VBI capture if required */
    s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
    if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
            test_bit(IVTV_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
            !test_and_set_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
        /* Note: the IVTV_ENC_STREAM_TYPE_VBI is claimed
           automatically when the MPG stream is claimed.
           We only need to start the VBI capturing. */
        if (ivtv_start_v4l2_encode_stream(s_vbi)) {
            IVTV_DEBUG_WARN("VBI capture start failed\n");

            /* Failure, clean up and return an error */
            clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
            clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
            /* also releases the associated VBI stream */
            ivtv_release_stream(s);
            return -EIO;
        }
        IVTV_DEBUG_INFO("VBI insertion started\n");
    }

    /* Tell the card to start capturing */
    if (!ivtv_start_v4l2_encode_stream(s)) {
        /* We're done */
        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
        /* Resume a possibly paused encoder */
        if (test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags))
            ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1);
        return 0;
    }

    /* failure, clean up */
    IVTV_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name);

    /* Note: the IVTV_ENC_STREAM_TYPE_VBI is released
       automatically when the MPG stream is released.
       We only need to stop the VBI capturing. */
    if (s->type == IVTV_ENC_STREAM_TYPE_MPG &&
            test_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags)) {
        ivtv_stop_v4l2_encode_stream(s_vbi, 0);
        clear_bit(IVTV_F_S_STREAMING, &s_vbi->s_flags);
    }
    clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
    ivtv_release_stream(s);
    return -EIO;
}
Esempio n. 18
0
/**
 * wlan_logging_thread() - The WLAN Logger thread
 * @Arg - pointer to the HDD context
 *
 * This thread logs log message to App registered for the logs.
 */
static int wlan_logging_thread(void *Arg)
{
	int ret_wait_status = 0;
	int ret = 0;

	set_user_nice(current, -2);

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	daemonize("wlan_logging_thread");
#endif

	while (!gwlan_logging.exit) {
		ret_wait_status = wait_event_interruptible(
		    gwlan_logging.wait_queue,
		    (!list_empty(&gwlan_logging.filled_list)
		  || test_bit(HOST_LOG_DRIVER_MSG, &gwlan_logging.eventFlag)
		  || test_bit(HOST_LOG_PER_PKT_STATS,
		     &gwlan_logging.eventFlag)
		  || test_bit(HOST_LOG_FW_FLUSH_COMPLETE,
		     &gwlan_logging.eventFlag)
		  || gwlan_logging.exit));

		if (ret_wait_status == -ERESTARTSYS) {
			pr_err("%s: wait_event_interruptible returned -ERESTARTSYS",
				__func__);
			break;
		}

		if (gwlan_logging.exit) {
			pr_err("%s: Exiting the thread\n", __func__);
			break;
		}

		if (test_and_clear_bit(HOST_LOG_DRIVER_MSG,
				       &gwlan_logging.eventFlag)) {
			ret = send_filled_buffers_to_user();
			if (-ENOMEM == ret) {
				msleep(200);
			}
		}

		if (test_and_clear_bit(HOST_LOG_PER_PKT_STATS,
				       &gwlan_logging.eventFlag)) {
			ret = pktlog_send_per_pkt_stats_to_user();
			if (-ENOMEM == ret) {
				msleep(200);
			}
		}

		if (test_and_clear_bit(HOST_LOG_FW_FLUSH_COMPLETE,
					&gwlan_logging.eventFlag)) {
			/* Flush bit could have been set while we were mid
			 * way in the logging thread. So, need to check other
			 * buffers like log messages, per packet stats again
			 * to flush any residual data in them
			 */
			if (gwlan_logging.is_flush_complete == true) {
				gwlan_logging.is_flush_complete = false;
				send_flush_completion_to_user();
			} else {
				gwlan_logging.is_flush_complete = true;
				set_bit(HOST_LOG_DRIVER_MSG,
						&gwlan_logging.eventFlag);
				set_bit(HOST_LOG_PER_PKT_STATS,
						&gwlan_logging.eventFlag);
				set_bit(HOST_LOG_FW_FLUSH_COMPLETE,
						&gwlan_logging.eventFlag);
				wake_up_interruptible(
					&gwlan_logging.wait_queue);
			}
		}
	}

	pr_info("%s: Terminating\n", __func__);

	complete_and_exit(&gwlan_logging.shutdown_comp, 0);

	return 0;
}
Esempio n. 19
0
int ivtv_v4l2_close(struct inode *inode, struct file *filp)
{
    struct ivtv_open_id *id = filp->private_data;
    struct ivtv *itv = id->itv;
    struct ivtv_stream *s = &itv->streams[id->type];

    IVTV_DEBUG_FILE("close %s\n", s->name);

    v4l2_prio_close(&itv->prio, &id->prio);

    /* Easy case first: this stream was never claimed by us */
    if (s->id != id->open_id) {
        kfree(id);
        return 0;
    }

    /* 'Unclaim' this stream */

    /* Stop radio */
    mutex_lock(&itv->serialize_lock);
    if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
        /* Closing radio device, return to TV mode */
        ivtv_mute(itv);
        /* Mark that the radio is no longer in use */
        clear_bit(IVTV_F_I_RADIO_USER, &itv->i_flags);
        /* Switch tuner to TV */
        ivtv_call_i2c_clients(itv, VIDIOC_S_STD, &itv->std);
        /* Select correct audio input (i.e. TV tuner or Line in) */
        ivtv_audio_set_io(itv);
        if (itv->hw_flags & IVTV_HW_SAA711X)
        {
            struct v4l2_crystal_freq crystal_freq;
            crystal_freq.freq = SAA7115_FREQ_32_11_MHZ;
            crystal_freq.flags = 0;
            ivtv_saa7115(itv, VIDIOC_INT_S_CRYSTAL_FREQ, &crystal_freq);
        }
        if (atomic_read(&itv->capturing) > 0) {
            /* Undo video mute */
            ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
                      itv->params.video_mute | (itv->params.video_mute_yuv << 8));
        }
        /* Done! Unmute and continue. */
        ivtv_unmute(itv);
        ivtv_release_stream(s);
    } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
        struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];

        ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);

        /* If all output streams are closed, and if the user doesn't have
           IVTV_DEC_STREAM_TYPE_VOUT open, then disable CC on TV-out. */
        if (itv->output_mode == OUT_NONE && !test_bit(IVTV_F_S_APPL_IO, &s_vout->s_flags)) {
            /* disable CC on TV-out */
            ivtv_disable_cc(itv);
        }
    } else {
        ivtv_stop_capture(id, 0);
    }
    kfree(id);
    mutex_unlock(&itv->serialize_lock);
    return 0;
}
Esempio n. 20
0
static void bluecard_write_wakeup(bluecard_info_t *info)
{
	if (!info) {
		BT_ERR("Unknown device");
		return;
	}

	if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		return;

	if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
		set_bit(XMIT_WAKEUP, &(info->tx_state));
		return;
	}

	do {
		register unsigned int iobase = info->link.io.BasePort1;
		register unsigned int offset;
		register unsigned char command;
		register unsigned long ready_bit;
		register struct sk_buff *skb;
		register int len;

		clear_bit(XMIT_WAKEUP, &(info->tx_state));

		if (!(info->link.state & DEV_PRESENT))
			return;

		if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
			if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state)))
				break;
			offset = 0x10;
			command = REG_COMMAND_TX_BUF_TWO;
			ready_bit = XMIT_BUF_TWO_READY;
		} else {
			if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state)))
				break;
			offset = 0x00;
			command = REG_COMMAND_TX_BUF_ONE;
			ready_bit = XMIT_BUF_ONE_READY;
		}

		if (!(skb = skb_dequeue(&(info->txq))))
			break;

		if (skb->pkt_type & 0x80) {
			/* Disable RTS */
			info->ctrl_reg |= REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);
		}

		/* Activate LED */
		bluecard_enable_activity_led(info);

		/* Send frame */
		len = bluecard_write(iobase, offset, skb->data, skb->len);

		/* Tell the FPGA to send the data */
		outb_p(command, iobase + REG_COMMAND);

		/* Mark the buffer as dirty */
		clear_bit(ready_bit, &(info->tx_state));

		if (skb->pkt_type & 0x80) {
			DECLARE_WAIT_QUEUE_HEAD(wq);
			DEFINE_WAIT(wait);

			unsigned char baud_reg;

			switch (skb->pkt_type) {
			case PKT_BAUD_RATE_460800:
				baud_reg = REG_CONTROL_BAUD_RATE_460800;
				break;
			case PKT_BAUD_RATE_230400:
				baud_reg = REG_CONTROL_BAUD_RATE_230400;
				break;
			case PKT_BAUD_RATE_115200:
				baud_reg = REG_CONTROL_BAUD_RATE_115200;
				break;
			case PKT_BAUD_RATE_57600:
				/* Fall through... */
			default:
				baud_reg = REG_CONTROL_BAUD_RATE_57600;
				break;
			}

			/* Wait until the command reaches the baseband */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ/10);
			finish_wait(&wq, &wait);

			/* Set baud on baseband */
			info->ctrl_reg &= ~0x03;
			info->ctrl_reg |= baud_reg;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Enable RTS */
			info->ctrl_reg &= ~REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Wait before the next HCI packet can be send */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ);
			finish_wait(&wq, &wait);
		}

		if (len == skb->len) {
			kfree_skb(skb);
		} else {
			skb_pull(skb, len);
			skb_queue_head(&(info->txq), skb);
		}

		info->hdev->stat.byte_tx += len;

		/* Change buffer */
		change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));

	} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));

	clear_bit(XMIT_SENDING, &(info->tx_state));
}
Esempio n. 21
0
static inline bool page_poison(struct page *page)
{
	return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
}
Esempio n. 22
0
static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
{
	unsigned int iobase;
	unsigned char buf[31];
	int i, len;

	if (!info) {
		BT_ERR("Unknown device");
		return;
	}

	iobase = info->link.io.BasePort1;

	if (test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		bluecard_enable_activity_led(info);

	len = bluecard_read(iobase, offset, buf, sizeof(buf));

	for (i = 0; i < len; i++) {

		/* Allocate packet */
		if (info->rx_skb == NULL) {
			info->rx_state = RECV_WAIT_PACKET_TYPE;
			info->rx_count = 0;
			if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
				BT_ERR("Can't allocate mem for new packet");
				return;
			}
		}

		if (info->rx_state == RECV_WAIT_PACKET_TYPE) {

			info->rx_skb->dev = (void *) info->hdev;
			info->rx_skb->pkt_type = buf[i];

			switch (info->rx_skb->pkt_type) {

			case 0x00:
				/* init packet */
				if (offset != 0x00) {
					set_bit(XMIT_BUF_ONE_READY, &(info->tx_state));
					set_bit(XMIT_BUF_TWO_READY, &(info->tx_state));
					set_bit(XMIT_SENDING_READY, &(info->tx_state));
					bluecard_write_wakeup(info);
				}

				kfree_skb(info->rx_skb);
				info->rx_skb = NULL;
				break;

			case HCI_EVENT_PKT:
				info->rx_state = RECV_WAIT_EVENT_HEADER;
				info->rx_count = HCI_EVENT_HDR_SIZE;
				break;

			case HCI_ACLDATA_PKT:
				info->rx_state = RECV_WAIT_ACL_HEADER;
				info->rx_count = HCI_ACL_HDR_SIZE;
				break;

			case HCI_SCODATA_PKT:
				info->rx_state = RECV_WAIT_SCO_HEADER;
				info->rx_count = HCI_SCO_HDR_SIZE;
				break;

			default:
				/* unknown packet */
				BT_ERR("Unknown HCI packet with type 0x%02x received", info->rx_skb->pkt_type);
				info->hdev->stat.err_rx++;

				kfree_skb(info->rx_skb);
				info->rx_skb = NULL;
				break;

			}

		} else {

			*skb_put(info->rx_skb, 1) = buf[i];
			info->rx_count--;

			if (info->rx_count == 0) {

				int dlen;
				struct hci_event_hdr *eh;
				struct hci_acl_hdr *ah;
				struct hci_sco_hdr *sh;

				switch (info->rx_state) {

				case RECV_WAIT_EVENT_HEADER:
					eh = (struct hci_event_hdr *)(info->rx_skb->data);
					info->rx_state = RECV_WAIT_DATA;
					info->rx_count = eh->plen;
					break;

				case RECV_WAIT_ACL_HEADER:
					ah = (struct hci_acl_hdr *)(info->rx_skb->data);
					dlen = __le16_to_cpu(ah->dlen);
					info->rx_state = RECV_WAIT_DATA;
					info->rx_count = dlen;
					break;

				case RECV_WAIT_SCO_HEADER:
					sh = (struct hci_sco_hdr *)(info->rx_skb->data);
					info->rx_state = RECV_WAIT_DATA;
					info->rx_count = sh->dlen;
					break;

				case RECV_WAIT_DATA:
					hci_recv_frame(info->rx_skb);
					info->rx_skb = NULL;
					break;

				}

			}

		}


	}

	info->hdev->stat.byte_rx += len;
}
Esempio n. 23
0
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
			    struct rdma_hw_stats *stats,
			    u8 port, int index)
{
	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
	struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
	int rc  = 0;

	if (!port || !stats)
		return -EINVAL;

	stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->qp_count);
	stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->srq_count);
	stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
	stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
	stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
	if (bnxt_re_stats) {
		stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
			le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
		stats->value[BNXT_RE_RX_DROPS] =
			le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
		stats->value[BNXT_RE_RX_DISCARDS] =
			le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
		stats->value[BNXT_RE_RX_PKTS] =
			le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
		stats->value[BNXT_RE_RX_BYTES] =
			le64_to_cpu(bnxt_re_stats->rx_ucast_bytes);
		stats->value[BNXT_RE_TX_PKTS] =
			le64_to_cpu(bnxt_re_stats->tx_ucast_pkts);
		stats->value[BNXT_RE_TX_BYTES] =
			le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
	}
	if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
		rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
		if (rc)
			clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
				  &rdev->flags);
		stats->value[BNXT_RE_TO_RETRANSMITS] =
					rdev->stats.to_retransmits;
		stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
					rdev->stats.seq_err_naks_rcvd;
		stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
					rdev->stats.max_retry_exceeded;
		stats->value[BNXT_RE_RNR_NAKS_RCVD] =
					rdev->stats.rnr_naks_rcvd;
		stats->value[BNXT_RE_MISSING_RESP] =
					rdev->stats.missing_resp;
		stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
					rdev->stats.unrecoverable_err;
		stats->value[BNXT_RE_BAD_RESP_ERR] =
					rdev->stats.bad_resp_err;
		stats->value[BNXT_RE_LOCAL_QP_OP_ERR]	=
				rdev->stats.local_qp_op_err;
		stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
				rdev->stats.local_protection_err;
		stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
				rdev->stats.mem_mgmt_op_err;
		stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
				rdev->stats.remote_invalid_req_err;
		stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
				rdev->stats.remote_access_err;
		stats->value[BNXT_RE_REMOTE_OP_ERR] =
				rdev->stats.remote_op_err;
		stats->value[BNXT_RE_DUP_REQ] =
				rdev->stats.dup_req;
		stats->value[BNXT_RE_RES_EXCEED_MAX] =
				rdev->stats.res_exceed_max;
		stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
				rdev->stats.res_length_mismatch;
		stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
				rdev->stats.res_exceeds_wqe;
		stats->value[BNXT_RE_RES_OPCODE_ERR] =
				rdev->stats.res_opcode_err;
		stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
				rdev->stats.res_rx_invalid_rkey;
		stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
				rdev->stats.res_rx_domain_err;
		stats->value[BNXT_RE_RES_RX_NO_PERM] =
				rdev->stats.res_rx_no_perm;
		stats->value[BNXT_RE_RES_RX_RANGE_ERR]  =
				rdev->stats.res_rx_range_err;
		stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
				rdev->stats.res_tx_invalid_rkey;
		stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
				rdev->stats.res_tx_domain_err;
		stats->value[BNXT_RE_RES_TX_NO_PERM] =
				rdev->stats.res_tx_no_perm;
		stats->value[BNXT_RE_RES_TX_RANGE_ERR]  =
				rdev->stats.res_tx_range_err;
		stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
				rdev->stats.res_irrq_oflow;
		stats->value[BNXT_RE_RES_UNSUP_OPCODE]  =
				rdev->stats.res_unsup_opcode;
		stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
				rdev->stats.res_unaligned_atomic;
		stats->value[BNXT_RE_RES_REM_INV_ERR]   =
				rdev->stats.res_rem_inv_err;
		stats->value[BNXT_RE_RES_MEM_ERROR] =
				rdev->stats.res_mem_error;
		stats->value[BNXT_RE_RES_SRQ_ERR] =
				rdev->stats.res_srq_err;
		stats->value[BNXT_RE_RES_CMP_ERR] =
				rdev->stats.res_cmp_err;
		stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
				rdev->stats.res_invalid_dup_rkey;
		stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
				rdev->stats.res_wqe_format_err;
		stats->value[BNXT_RE_RES_CQ_LOAD_ERR]   =
				rdev->stats.res_cq_load_err;
		stats->value[BNXT_RE_RES_SRQ_LOAD_ERR]  =
				rdev->stats.res_srq_load_err;
		stats->value[BNXT_RE_RES_TX_PCI_ERR]    =
				rdev->stats.res_tx_pci_err;
		stats->value[BNXT_RE_RES_RX_PCI_ERR]    =
				rdev->stats.res_rx_pci_err;
		stats->value[BNXT_RE_OUT_OF_SEQ_ERR]    =
				rdev->stats.res_oos_drop_count;
	}

	return ARRAY_SIZE(bnxt_re_stat_name);
}
Esempio n. 24
0
static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
				       struct sk_buff *skb,
				       const struct net_device *in,
				       const struct net_device *out,
				       int (*okfn)(struct sk_buff *))
{
	struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
	enum ip_conntrack_info ctinfo;
	struct nf_conn *ct;
	struct nf_conn_synproxy *synproxy;
	struct synproxy_options opts = {};
	const struct ip_ct_tcp *state;
	struct tcphdr *th, _th;
	__be16 frag_off;
	u8 nexthdr;
	int thoff;

	ct = nf_ct_get(skb, &ctinfo);
	if (ct == NULL)
		return NF_ACCEPT;

	synproxy = nfct_synproxy(ct);
	if (synproxy == NULL)
		return NF_ACCEPT;

	if (nf_is_loopback_packet(skb))
		return NF_ACCEPT;

	nexthdr = ipv6_hdr(skb)->nexthdr;
	thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
				 &frag_off);
	if (thoff < 0)
		return NF_ACCEPT;

	th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
	if (th == NULL)
		return NF_DROP;

	state = &ct->proto.tcp;
	switch (state->state) {
	case TCP_CONNTRACK_CLOSE:
		if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
			nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
						      ntohl(th->seq) + 1);
			break;
		}

		if (!th->syn || th->ack ||
		    CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
			break;

		/* Reopened connection - reset the sequence number and timestamp
		 * adjustments, they will get initialized once the connection is
		 * reestablished.
		 */
		nf_ct_seqadj_init(ct, ctinfo, 0);
		synproxy->tsoff = 0;
		this_cpu_inc(snet->stats->conn_reopened);

		/* fall through */
	case TCP_CONNTRACK_SYN_SENT:
		synproxy_parse_options(skb, thoff, th, &opts);

		if (!th->syn && th->ack &&
		    CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
			/* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
			 * therefore we need to add 1 to make the SYN sequence
			 * number match the one of first SYN.
			 */
			if (synproxy_recv_client_ack(snet, skb, th, &opts,
						     ntohl(th->seq) + 1))
				this_cpu_inc(snet->stats->cookie_retrans);

			return NF_DROP;
		}

		synproxy->isn = ntohl(th->ack_seq);
		if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
			synproxy->its = opts.tsecr;
		break;
	case TCP_CONNTRACK_SYN_RECV:
		if (!th->syn || !th->ack)
			break;

		synproxy_parse_options(skb, thoff, th, &opts);
		if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
			synproxy->tsoff = opts.tsval - synproxy->its;

		opts.options &= ~(XT_SYNPROXY_OPT_MSS |
				  XT_SYNPROXY_OPT_WSCALE |
				  XT_SYNPROXY_OPT_SACK_PERM);

		swap(opts.tsval, opts.tsecr);
		synproxy_send_server_ack(snet, state, skb, th, &opts);

		nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));

		swap(opts.tsval, opts.tsecr);
		synproxy_send_client_ack(snet, skb, th, &opts);

		consume_skb(skb);
		return NF_STOLEN;
	default:
		break;
	}

	synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
	return NF_ACCEPT;
}
Esempio n. 25
0
static int
efw_probe(struct fw_unit *unit,
	  const struct ieee1394_device_id *entry)
{
	struct snd_card *card;
	struct snd_efw *efw;
	int card_index, err;

	mutex_lock(&devices_mutex);

	/* check registered cards */
	for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) {
		if (!test_bit(card_index, devices_used) && enable[card_index])
			break;
	}
	if (card_index >= SNDRV_CARDS) {
		err = -ENOENT;
		goto end;
	}

	err = snd_card_new(&unit->device, index[card_index], id[card_index],
			   THIS_MODULE, sizeof(struct snd_efw), &card);
	if (err < 0)
		goto end;
	efw = card->private_data;
	efw->card_index = card_index;
	set_bit(card_index, devices_used);
	card->private_free = efw_card_free;

	efw->card = card;
	efw->unit = fw_unit_get(unit);
	mutex_init(&efw->mutex);
	spin_lock_init(&efw->lock);
	init_waitqueue_head(&efw->hwdep_wait);

	/* prepare response buffer */
	snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size,
				      SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U);
	efw->resp_buf = kzalloc(snd_efw_resp_buf_size, GFP_KERNEL);
	if (efw->resp_buf == NULL) {
		err = -ENOMEM;
		goto error;
	}
	efw->pull_ptr = efw->push_ptr = efw->resp_buf;
	snd_efw_transaction_add_instance(efw);

	err = get_hardware_info(efw);
	if (err < 0)
		goto error;
	/* AudioFire8 (since 2009) and AudioFirePre8 */
	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
		efw->is_af9 = true;
	/* These models uses the same firmware. */
	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
	    entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
	    entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
	    entry->model_id == MODEL_GIBSON_RIP ||
	    entry->model_id == MODEL_GIBSON_GOLDTOP)
		efw->is_fireworks3 = true;

	snd_efw_proc_init(efw);

	if (efw->midi_out_ports || efw->midi_in_ports) {
		err = snd_efw_create_midi_devices(efw);
		if (err < 0)
			goto error;
	}

	err = snd_efw_create_pcm_devices(efw);
	if (err < 0)
		goto error;

	err = snd_efw_create_hwdep_device(efw);
	if (err < 0)
		goto error;

	err = snd_efw_stream_init_duplex(efw);
	if (err < 0)
		goto error;

	err = snd_card_register(card);
	if (err < 0) {
		snd_efw_stream_destroy_duplex(efw);
		goto error;
	}

	dev_set_drvdata(&unit->device, efw);
end:
	mutex_unlock(&devices_mutex);
	return err;
error:
	snd_efw_transaction_remove_instance(efw);
	mutex_unlock(&devices_mutex);
	snd_card_free(card);
	return err;
}
Esempio n. 26
0
File: wcmd.c Progetto: 020gzh/linux
void vnt_run_command(struct work_struct *work)
{
	struct vnt_private *priv =
		container_of(work, struct vnt_private, run_command_work.work);

	if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
		return;

	if (priv->cmd_running != true)
		return;

	switch (priv->command_state) {
	case WLAN_CMD_INIT_MAC80211_START:
		if (priv->mac_hw)
			break;

		dev_info(&priv->usb->dev, "Starting mac80211\n");

		if (vnt_init(priv)) {
			/* If fail all ends TODO retry */
			dev_err(&priv->usb->dev, "failed to start\n");
			ieee80211_free_hw(priv->hw);
			return;
		}

		break;

	case WLAN_CMD_TBTT_WAKEUP_START:
		vnt_next_tbtt_wakeup(priv);
		break;

	case WLAN_CMD_BECON_SEND_START:
		if (!priv->vif)
			break;

		vnt_beacon_make(priv, priv->vif);

		vnt_mac_reg_bits_on(priv, MAC_REG_TCR, TCR_AUTOBCNTX);

		break;

	case WLAN_CMD_SETPOWER_START:

		vnt_rf_setpower(priv, priv->current_rate,
				priv->hw->conf.chandef.chan->hw_value);

		break;

	case WLAN_CMD_CHANGE_ANTENNA_START:
		dev_dbg(&priv->usb->dev, "Change from Antenna%d to",
							priv->rx_antenna_sel);

		if (priv->rx_antenna_sel == 0) {
			priv->rx_antenna_sel = 1;
			if (priv->tx_rx_ant_inv == true)
				vnt_set_antenna_mode(priv, ANT_RXA);
			else
				vnt_set_antenna_mode(priv, ANT_RXB);
		} else {
			priv->rx_antenna_sel = 0;
			if (priv->tx_rx_ant_inv == true)
				vnt_set_antenna_mode(priv, ANT_RXB);
			else
				vnt_set_antenna_mode(priv, ANT_RXA);
		}
		break;

	default:
		break;
	}

	vnt_cmd_complete(priv);
}
Esempio n. 27
0
/**
 * n_hdlc_tty_read - Called to retrieve one frame of data (if available)
 * @tty - pointer to tty instance data
 * @file - pointer to open file object
 * @buf - pointer to returned data buffer
 * @nr - size of returned data buffer
 * 	
 * Returns the number of bytes returned or error code.
 */
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
			   __u8 __user *buf, size_t nr)
{
	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
	int ret = 0;
	struct n_hdlc_buf *rbuf;
	DECLARE_WAITQUEUE(wait, current);

	if (debuglevel >= DEBUG_LEVEL_INFO)	
		printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
		
	/* Validate the pointers */
	if (!n_hdlc)
		return -EIO;

	/* verify user access to buffer */
	if (!access_ok(VERIFY_WRITE, buf, nr)) {
		printk(KERN_WARNING "%s(%d) n_hdlc_tty_read() can't verify user "
		"buffer\n", __FILE__, __LINE__);
		return -EFAULT;
	}

	add_wait_queue(&tty->read_wait, &wait);

	for (;;) {
		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
			ret = -EIO;
			break;
		}
		if (tty_hung_up_p(file))
			break;

		set_current_state(TASK_INTERRUPTIBLE);

		rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
		if (rbuf) {
			if (rbuf->count > nr) {
				/* too large for caller's buffer */
				ret = -EOVERFLOW;
			} else {
				if (copy_to_user(buf, rbuf->buf, rbuf->count))
					ret = -EFAULT;
				else
					ret = rbuf->count;
			}

			if (n_hdlc->rx_free_buf_list.count >
			    DEFAULT_RX_BUF_COUNT)
				kfree(rbuf);
			else
				n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
			break;
		}
			
		/* no data */
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			break;
		}

		schedule();

		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
	}

	remove_wait_queue(&tty->read_wait, &wait);
	__set_current_state(TASK_RUNNING);

	return ret;
	
}	/* end of n_hdlc_tty_read() */
Esempio n. 28
0
File: w6692.c Progetto: 020gzh/linux
static void
W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
{
	u_char val;
	u_char r;
	struct BCState *bcs;
	struct sk_buff *skb;
	int count;

	bcs = (cs->bcs->channel == bchan) ? cs->bcs : (cs->bcs + 1);
	val = cs->BC_Read_Reg(cs, bchan, W_B_EXIR);
	debugl1(cs, "W6692B chan %d B_EXIR 0x%02X", bchan, val);

	if (!test_bit(BC_FLG_INIT, &bcs->Flag)) {
		debugl1(cs, "W6692B not INIT yet");
		return;
	}
	if (val & W_B_EXI_RME) {	/* RME */
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & (W_B_STAR_RDOV | W_B_STAR_CRCE | W_B_STAR_RMB)) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B STAR %x", r);
			if ((r & W_B_STAR_RDOV) && bcs->mode)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B RDOV mode=%d",
						bcs->mode);
			if (r & W_B_STAR_CRCE)
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "W6692 B CRC error");
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
		} else {
			count = cs->BC_Read_Reg(cs, bchan, W_B_RBCL) & (W_B_FIFO_THRESH - 1);
			if (count == 0)
				count = W_B_FIFO_THRESH;
			W6692B_empty_fifo(bcs, count);
			if ((count = bcs->hw.w6692.rcvidx) > 0) {
				if (cs->debug & L1_DEB_HSCX_FIFO)
					debugl1(cs, "W6692 Bchan Frame %d", count);
				if (!(skb = dev_alloc_skb(count)))
					printk(KERN_WARNING "W6692: Bchan receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), bcs->hw.w6692.rcvbuf, count);
					skb_queue_tail(&bcs->rqueue, skb);
				}
			}
		}
		bcs->hw.w6692.rcvidx = 0;
		schedule_event(bcs, B_RCVBUFREADY);
	}
	if (val & W_B_EXI_RMR) {	/* RMR */
		W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & W_B_STAR_RDOV) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B RDOV(RMR) mode=%d", bcs->mode);
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RRST | W_B_CMDR_RACT);
			if (bcs->mode != L1_MODE_TRANS)
				bcs->hw.w6692.rcvidx = 0;
		}
		if (bcs->mode == L1_MODE_TRANS) {
			/* receive audio data */
			if (!(skb = dev_alloc_skb(W_B_FIFO_THRESH)))
				printk(KERN_WARNING "HiSax: receive out of memory\n");
			else {
				memcpy(skb_put(skb, W_B_FIFO_THRESH), bcs->hw.w6692.rcvbuf, W_B_FIFO_THRESH);
				skb_queue_tail(&bcs->rqueue, skb);
			}
			bcs->hw.w6692.rcvidx = 0;
			schedule_event(bcs, B_RCVBUFREADY);
		}
	}
	if (val & W_B_EXI_XDUN) {	/* XDUN */
		cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "W6692 B EXIR %x Lost TX", val);
		if (bcs->mode == 1)
			W6692B_fill_fifo(bcs);
		else {
			/* Here we lost an TX interrupt, so
			 * restart transmitting the whole frame.
			 */
			if (bcs->tx_skb) {
				skb_push(bcs->tx_skb, bcs->hw.w6692.count);
				bcs->tx_cnt += bcs->hw.w6692.count;
				bcs->hw.w6692.count = 0;
			}
		}
		return;
	}
	if (val & W_B_EXI_XFR) {	/* XFR */
		r = cs->BC_Read_Reg(cs, bchan, W_B_STAR);
		if (r & W_B_STAR_XDOW) {
			if (cs->debug & L1_DEB_WARN)
				debugl1(cs, "W6692 B STAR %x XDOW", r);
			cs->BC_Write_Reg(cs, bchan, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
			if (bcs->tx_skb && (bcs->mode != 1)) {
				skb_push(bcs->tx_skb, bcs->hw.w6692.count);
				bcs->tx_cnt += bcs->hw.w6692.count;
				bcs->hw.w6692.count = 0;
			}
		}
		if (bcs->tx_skb) {
			if (bcs->tx_skb->len) {
				W6692B_fill_fifo(bcs);
				return;
			} else {
				if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) &&
				    (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
					u_long flags;
					spin_lock_irqsave(&bcs->aclock, flags);
					bcs->ackcnt += bcs->hw.w6692.count;
					spin_unlock_irqrestore(&bcs->aclock, flags);
					schedule_event(bcs, B_ACKPENDING);
				}
				dev_kfree_skb_irq(bcs->tx_skb);
				bcs->hw.w6692.count = 0;
				bcs->tx_skb = NULL;
			}
		}
		if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
			bcs->hw.w6692.count = 0;
			test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
			W6692B_fill_fifo(bcs);
		} else {
			test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
			schedule_event(bcs, B_XMTBUFREADY);
		}
	}
}
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
	struct ivtv *itv = s->itv;
	DECLARE_WAITQUEUE(wait, current);
	int cap_type;
	int stopmode;

	if (s->vdev == NULL)
		return -EINVAL;


	IVTV_DEBUG_INFO("Stop Capture\n");

	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
		return 0;
	if (atomic_read(&itv->capturing) == 0)
		return 0;

	switch (s->type) {
	case IVTV_ENC_STREAM_TYPE_YUV:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_PCM:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_VBI:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_MPG:
	default:
		cap_type = 0;
		break;
	}

	
	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
		stopmode = 0;
	} else {
		stopmode = 1;
	}

	
	
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);

	if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
			
			unsigned long duration;
			unsigned long then = jiffies;

			add_wait_queue(&itv->eos_waitq, &wait);

			set_current_state(TASK_INTERRUPTIBLE);

			
			while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
				time_before(jiffies,
					    then + msecs_to_jiffies(2000))) {
				schedule_timeout(msecs_to_jiffies(10));
			}

			duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);

			if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
				IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
				IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
			} else {
				IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
			}
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&itv->eos_waitq, &wait);
			set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
		}

		
		ivtv_msleep_timeout(100, 0);
	}

	atomic_dec(&itv->capturing);

	
	clear_bit(IVTV_F_S_STREAMING, &s->s_flags);

	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);

	if (atomic_read(&itv->capturing) > 0) {
		return 0;
	}

	cx2341x_handler_set_busy(&itv->cxhdl, 0);

	
	ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
	del_timer(&itv->dma_timer);

	
	if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
		
		
		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
	}

	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);

	wake_up(&s->waitq);

	return 0;
}
Esempio n. 30
0
/**
 * ixgbe_fcoe_ddp_setup - called to set up ddp context
 * @netdev: the corresponding net_device
 * @xid: the exchange id requesting ddp
 * @sgl: the scatter-gather list for this request
 * @sgc: the number of scatter-gather items
 *
 * Returns : 1 for success and 0 for no ddp
 */
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
				struct scatterlist *sgl, unsigned int sgc,
				int target_mode)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct ixgbe_fcoe *fcoe;
	struct ixgbe_fcoe_ddp *ddp;
	struct ixgbe_fcoe_ddp_pool *ddp_pool;
	struct scatterlist *sg;
	unsigned int i, j, dmacount;
	unsigned int len;
	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
	unsigned int firstoff = 0;
	unsigned int lastsize;
	unsigned int thisoff = 0;
	unsigned int thislen = 0;
	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
	dma_addr_t addr = 0;

	if (!netdev || !sgl || !sgc)
		return 0;

	adapter = netdev_priv(netdev);
	if (xid > netdev->fcoe_ddp_xid) {
		e_warn(drv, "xid=0x%x out-of-range\n", xid);
		return 0;
	}

	/* no DDP if we are already down or resetting */
	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
	    test_bit(__IXGBE_RESETTING, &adapter->state))
		return 0;

	fcoe = &adapter->fcoe;
	ddp = &fcoe->ddp[xid];
	if (ddp->sgl) {
		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
			xid, ddp->sgl, ddp->sgc);
		return 0;
	}
	ixgbe_fcoe_clear_ddp(ddp);


	if (!fcoe->ddp_pool) {
		e_warn(drv, "No ddp_pool resources allocated\n");
		return 0;
	}

	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
	if (!ddp_pool->pool) {
		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
		goto out_noddp;
	}

	/* setup dma from scsi command sgl */
	dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE);
	if (dmacount == 0) {
		e_err(drv, "xid 0x%x DMA map error\n", xid);
		goto out_noddp;
	}

	/* alloc the udl from per cpu ddp pool */
	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
	if (!ddp->udl) {
		e_err(drv, "failed allocated ddp context\n");
		goto out_noddp_unmap;
	}
	ddp->pool = ddp_pool->pool;
	ddp->sgl = sgl;
	ddp->sgc = sgc;

	j = 0;
	for_each_sg(sgl, sg, dmacount, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
		while (len) {
			/* max number of buffers allowed in one DDP context */
			if (j >= IXGBE_BUFFCNT_MAX) {
				ddp_pool->noddp++;
				goto out_noddp_free;
			}

			/* get the offset of length of current buffer */
			thisoff = addr & ((dma_addr_t)bufflen - 1);
			thislen = min((bufflen - thisoff), len);
			/*
			 * all but the 1st buffer (j == 0)
			 * must be aligned on bufflen
			 */
			if ((j != 0) && (thisoff))
				goto out_noddp_free;
			/*
			 * all but the last buffer
			 * ((i == (dmacount - 1)) && (thislen == len))
			 * must end at bufflen
			 */
			if (((i != (dmacount - 1)) || (thislen != len))
			    && ((thislen + thisoff) != bufflen))
				goto out_noddp_free;

			ddp->udl[j] = (u64)(addr - thisoff);
			/* only the first buffer may have none-zero offset */
			if (j == 0)
				firstoff = thisoff;
			len -= thislen;
			addr += thislen;
			j++;
		}
	}