Exemple #1
0
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
{
	struct hfcsusb *hw = bch->hw;
	__u8 conhdlc, sctrl, sctrl_r;

	if (debug & DEBUG_HW)
		printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n",
		    hw->name, __func__, bch->state, protocol,
		    bch->nr);

	
	conhdlc = 0;
	if (protocol > ISDN_P_NONE)
		conhdlc = 8;	

	switch (protocol) {
	case (-1):	
		bch->state = -1;
		
	case (ISDN_P_NONE):
		if (bch->state == ISDN_P_NONE)
			return 0; 
		bch->state = ISDN_P_NONE;
		clear_bit(FLG_HDLC, &bch->Flags);
		clear_bit(FLG_TRANSPARENT, &bch->Flags);
		break;
	case (ISDN_P_B_RAW):
		conhdlc |= 2;
		bch->state = protocol;
		set_bit(FLG_TRANSPARENT, &bch->Flags);
		break;
	case (ISDN_P_B_HDLC):
		bch->state = protocol;
		set_bit(FLG_HDLC, &bch->Flags);
		break;
	default:
		if (debug & DEBUG_HW)
			printk(KERN_DEBUG "%s: %s: prot not known %x\n",
				hw->name, __func__, protocol);
		return -ENOPROTOOPT;
	}

	if (protocol >= ISDN_P_NONE) {
		write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2);
		write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
		write_reg(hw, HFCUSB_INC_RES_F, 2);
		write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3);
		write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
		write_reg(hw, HFCUSB_INC_RES_F, 2);

		sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04);
		sctrl_r = 0x0;
		if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) {
			sctrl |= 1;
			sctrl_r |= 1;
		}
		if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) {
			sctrl |= 2;
			sctrl_r |= 2;
		}
		write_reg(hw, HFCUSB_SCTRL, sctrl);
		write_reg(hw, HFCUSB_SCTRL_R, sctrl_r);

		if (protocol > ISDN_P_NONE)
			handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON);
		else
			handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
				LED_B2_OFF);
	}
	hfcsusb_ph_info(hw);
	return 0;
}
Exemple #2
0
static int __devinit optjoy_spi_probe(struct platform_device *pdev)
{
	struct optjoy_drv_data *optjoy_data;
	int ret = 0;

	gprintk("start.\n");

	optjoy_gpio_init();  

	optjoy_workqueue = create_singlethread_workqueue("optjoy_workqueue");  
	if (optjoy_workqueue == NULL){
		printk(KERN_ERR "[optjoy_spi_probe] create_singlethread_workqueue failed.\n");
		ret = -ENOMEM;
		goto err_create_workqueue_failed;
	}

	/* alloc driver data */
	optjoy_data = kzalloc(sizeof(struct optjoy_drv_data), GFP_KERNEL);
	if (!optjoy_data) {
		printk(KERN_ERR "[optjoy_spi_probe] kzalloc error\n");
		ret = -ENOMEM;
		goto err_alloc_data_failed;
	}
  	
	optjoy_data->input_dev = input_allocate_device();
	if (optjoy_data->input_dev == NULL) {
		printk(KERN_ERR "[optjoy_spi_probe] Failed to allocate input device\n");
		ret = -ENOMEM;
		goto err_input_dev_alloc_failed;
	}


	/* workqueue initialize */
	INIT_WORK(&optjoy_data->work, optjoy_spi_work_func);

	optjoy_hw_init();

	optjoy_data->input_dev->name = "optjoy_device";
	//optjoy_data->input_dev->phys = "optjoy_device/input2";

	set_bit(EV_KEY, optjoy_data->input_dev->evbit);

	set_bit(SEC_KEY_LEFT, optjoy_data->input_dev->keybit);
	set_bit(SEC_KEY_RIGHT, optjoy_data->input_dev->keybit);
	set_bit(SEC_KEY_UP, optjoy_data->input_dev->keybit);	
	set_bit(SEC_KEY_DOWN, optjoy_data->input_dev->keybit);

	optjoy_data->input_dev->keycode = optjoy_keycode;

	ret = input_register_device(optjoy_data->input_dev);
	if (ret) {
		printk(KERN_ERR "[optjoy_spi_probe] Unable to register %s input device\n", optjoy_data->input_dev->name);
		goto err_input_register_device_failed;
	}

#if 0  //TEMP
	/* IRQ setting */
	ret = request_irq(IRQ_OJT_INT, optjoy_spi_irq_handler, 0, "optjoy_device", optjoy_data);
	if (!ret) {
		optjoy_data->use_irq = 1;
		gprintk("Start INTERRUPT mode!\n");
	}
	else {
		gprintk(KERN_ERR "[optjoy_spi_probe] unable to request_irq\n");
		optjoy_data->use_irq = 0;
	}       
#else
	optjoy_data->use_irq = 0;
#endif

	/* timer init & start (if not INTR mode...) */
	if (!optjoy_data->use_irq) {
		hrtimer_init(&optjoy_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		optjoy_data->timer.function = optjoy_spi_timer_func;
		hrtimer_start(&optjoy_data->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
	}

	hrtimer_init(&optjoy_data->timer_touchlock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	optjoy_data->timer_touchlock.function = optjoy_spi_timer_func_touchlock;
	

#ifdef CONFIG_HAS_EARLYSUSPEND
	optjoy_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 2;
	optjoy_data->early_suspend.suspend = optjoy_spi_early_suspend;
	optjoy_data->early_suspend.resume = optjoy_spi_late_resume;
	register_early_suspend(&optjoy_data->early_suspend);
#endif	/* CONFIG_HAS_EARLYSUSPEND */

	return 0;

err_input_register_device_failed:
	input_free_device(optjoy_data->input_dev);

err_input_dev_alloc_failed:
	kfree(optjoy_data);

err_create_workqueue_failed:
err_alloc_data_failed:
	return ret;
}
Exemple #3
0
static void pasemi_free_rx_chan(int chan)
{
	BUG_ON(test_bit(chan, rxch_free));
	set_bit(chan, rxch_free);
}
/*-------------------------------------------------------------------------*/
void usbnet_pause_rx(struct usbnet *dev)
{
	set_bit(EVENT_RX_PAUSED, &dev->flags);

	netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
}
Exemple #5
0
DLLIMPORT
#endif
int sidp_seq_negotiation_host(struct sidpconn *conn) {
	struct neg_data neg_data;

	/* Check if the connection is initiated */
	if (!test_bit(&conn->status_flags, SIDP_INITIATED_FL))
		return -1;

	/* Check if the connection is authenticated */
	if (!test_bit(&conn->status_flags, SIDP_AUTHENTICATED_FL))
		return -2;

	/* Receive support flags of the remote host */
	if (sidp_seq_negotiation_pkt_recv(conn, &neg_data) < 0)
		return -3;

	neg_data.flags = ntohl(neg_data.flags);

	/* Cross support flags of both end-points */
	neg_data.flags &= conn->support_flags;

	neg_data.flags = htonl(neg_data.flags);

	/* Send data to the remote host */
	if (sidp_seq_negotiation_pkt_send(conn, &neg_data) < 0)
		return -4;

	neg_data.flags = ntohl(neg_data.flags);

	/* Test compression negotiation */
	if (test_bit(&neg_data.flags, SIDP_SUPPORT_COMPRESS_LZO_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_COMPRESS_LZO_FL);
	} else if (test_bit(&neg_data.flags, SIDP_SUPPORT_COMPRESS_FASTLZ_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_COMPRESS_FASTLZ_FL);
	} else if (test_bit(&neg_data.flags, SIDP_SUPPORT_COMPRESS_ZLIB_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_COMPRESS_ZLIB_FL);
	} else {
		return -5;
	}

	/* Test encryption negotiation */
	if (test_bit(&neg_data.flags, SIDP_SUPPORT_CIPHER_XSALSA20_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_CIPHER_XSALSA20_FL);
	} else if (test_bit(&neg_data.flags, SIDP_SUPPORT_CIPHER_CHACHA_AVX_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_CIPHER_CHACHA_AVX_FL);
	} else if (test_bit(&neg_data.flags, SIDP_SUPPORT_CIPHER_CHACHA_AVX2_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_CIPHER_CHACHA_AVX2_FL);
	} else if (test_bit(&neg_data.flags, SIDP_SUPPORT_CIPHER_AES256_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_CIPHER_AES256_FL);
	} else {
		return -6;
	}

	/* Test encapsulation negotiation */
	if (test_bit(&neg_data.flags, SIDP_SUPPORT_ENCAP_DEFAULT_FL)) {
		set_bit(&conn->negotiate_flags, SIDP_NEGOTIATE_ENCAP_DEFAULT_FL);
	} else {
		return -7;
	}

	/* Set status to negotiated */
	set_bit(&conn->status_flags, SIDP_NEGOTIATED_FL);

	return 0;
}
Exemple #6
0
static int ohci_pci_resume (struct usb_hcd *hcd)
{
    set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
    ohci_finish_controller_resume(hcd);
    return 0;
}
Exemple #7
0
static int __init star_hall_probe( struct platform_device *pdev )
{
	int err = 0;
	struct device *dev = &pdev->dev;
	struct star_hall_platform_data *pdata;
	atomic_set( &sensing_hall, 1 );
	printk("[HALLIC] probing hall ic driver\n");

	g_hall = kzalloc( sizeof(*g_hall), GFP_KERNEL );
	if(g_hall == NULL)
	{
		err = -1;
		printk("[HALLIC] Fail to alloc the memory for HALL IC\n");
		goto error;
	}

	/*g_hall->h_hall_pmu = NvOdmServicesPmuOpen();
	if(!g_hall->h_hall_pmu)
	{
		err = -1;
		printk("[HALL:%s] Fail to Open the pmu!\n", __func__);
		goto error;
	}*/

/*
	if (star_hall_set_power_rail(12, NV_TRUE) != 0)
	{
		err = -1;
		goto error;
	}
*/
	pdata	=	pdev->dev.platform_data;
	if(pdata){
		g_hall->gpio = pdata->gpio;
		g_hall->irqflags = pdata->irqflags;
		g_hall->power = pdata->power;
	}

	if(g_hall->power)
	{
		if(g_hall->power("vddio_sys", 1) != 0)
		{
			err = -1;
			printk(KERN_ERR"Error to power control vddio_sys of HALL IC\n");
			goto error;
		}
	}
	power_enabled = true;
/*
	g_hall->h_hall_gpio = NvOdmGpioOpen();
	if(!g_hall->h_hall_gpio)
	{
		printk("[HALL:%s] Fail to open gpio\n", __func__);
		err = -1;
		goto error;
	}

	g_hall->hall_intr_port = 'u' - 'a';
	g_hall->hall_intr_pin  = 5;

	g_hall->h_hall_gpio_pin = NvOdmGpioAcquirePinHandle(g_hall->h_hall_gpio, g_hall->hall_intr_port, g_hall->hall_intr_pin );
	if( !g_hall->h_hall_gpio_pin )
	{
		printk("[HALL:%s] Fail to acquire pin handle!\n", __func__);
		err = -1;
		goto error;
	}

	NvOdmGpioConfig(g_hall->h_hall_gpio, g_hall->h_hall_gpio_pin, NvOdmGpioPinMode_InputData);
*/
	g_hall->is_int_mode = true;

	tegra_gpio_enable(g_hall->gpio);
	gpio_request(g_hall->gpio, "star_hall");
	gpio_direction_input(g_hall->gpio); 

	if( !g_hall->is_int_mode ){	
	//for polling mode
		INIT_DELAYED_WORK(&g_hall->delayed_work_hall, star_hall_work_func);
		schedule_delayed_work(&g_hall->delayed_work_hall, 100 );
	} else{
	//int mode
#if 0
        if (NvOdmGpioInterruptRegister(g_hall->h_hall_gpio, &(g_hall->h_hall_intr),
            g_hall->h_hall_gpio_pin, NvOdmGpioPinMode_InputInterruptAny, /*NvOdmGpioPinMode_InputInterruptFallingEdge,*/ star_hall_intr_handler, (void*)&g_hall, 0) == NV_FALSE){
			printk("[HALL:%s] Fail to register hall's interrupt\n", __func__ );	
			goto err_irq_request;
		} 
#endif
		err = request_irq(gpio_to_irq(g_hall->gpio), star_hall_intr_handler, g_hall->irqflags, pdev->name, dev);
		if(err)
		{
			printk(KERN_ERR"Error to request_irq failed of HALL IC\n");
			goto err_irq_request;
		}
	}

	g_hall->input_device = input_allocate_device();
	if(!g_hall->input_device){
		printk(KERN_ERR"Error to allocate the input device of HALL IC\n");
		err = -ENOMEM;
		goto err_irq_request;
	}

	set_bit(EV_KEY, g_hall->input_device->evbit);
	set_bit(KEY_POWER, g_hall->input_device->keybit);
	set_bit(EV_ABS, g_hall->input_device->evbit);
	input_set_abs_params(g_hall->input_device, ABS_HAT2X, 0, 1, 0, 0);
	g_hall->input_device->name = "hall_ic";
	err = input_register_device(g_hall->input_device);
	
	if(err){
		printk(KERN_ERR"error to register the input device of hall\n");
		err = -ENOMEM;
		goto err_irq_request;
	}
		
	err = sysfs_create_group(&dev->kobj, &star_hall_group );
	if(err){
		printk(KERN_ERR"error to create sys file system of HALL IC\n");
		err = -ENOMEM;
		goto err_irq_request;
	}
	DBGHALL("probe: ok\n");		

	return 0;

error:
	return err;
err_irq_request:
	gpio_free(gpio_to_irq(g_hall->gpio));
	return err;
}
Exemple #8
0
static void recover_prep(void *arg)
{
	struct mddev *mddev = arg;
	struct md_cluster_info *cinfo = mddev->cluster_info;
	set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
}
Exemple #9
0
static int gather_all_resync_info(struct mddev *mddev, int total_slots)
{
	struct md_cluster_info *cinfo = mddev->cluster_info;
	int i, ret = 0;
	struct dlm_lock_resource *bm_lockres;
	struct suspend_info *s;
	char str[64];
	sector_t lo, hi;


	for (i = 0; i < total_slots; i++) {
		memset(str, '\0', 64);
		snprintf(str, 64, "bitmap%04d", i);
		bm_lockres = lockres_init(mddev, str, NULL, 1);
		if (!bm_lockres)
			return -ENOMEM;
		if (i == (cinfo->slot_number - 1)) {
			lockres_free(bm_lockres);
			continue;
		}

		bm_lockres->flags |= DLM_LKF_NOQUEUE;
		ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
		if (ret == -EAGAIN) {
			memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
			s = read_resync_info(mddev, bm_lockres);
			if (s) {
				pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
						__func__, __LINE__,
						(unsigned long long) s->lo,
						(unsigned long long) s->hi, i);
				spin_lock_irq(&cinfo->suspend_lock);
				s->slot = i;
				list_add(&s->list, &cinfo->suspend_list);
				spin_unlock_irq(&cinfo->suspend_lock);
			}
			ret = 0;
			lockres_free(bm_lockres);
			continue;
		}
		if (ret) {
			lockres_free(bm_lockres);
			goto out;
		}

		/* Read the disk bitmap sb and check if it needs recovery */
		ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
		if (ret) {
			pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
			lockres_free(bm_lockres);
			continue;
		}
		if ((hi > 0) && (lo < mddev->recovery_cp)) {
			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
			mddev->recovery_cp = lo;
			md_check_recovery(mddev);
		}

		dlm_unlock_sync(bm_lockres);
		lockres_free(bm_lockres);
	}
out:
	return ret;
}
Exemple #10
0
int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
{
	u32 data[MAX_MB_ARGUMENTS];
	struct cx18 *cx = s->cx;
	int captype = 0;
	struct cx18_stream *s_idx;

	if (!cx18_stream_enabled(s))
		return -EINVAL;

	CX18_DEBUG_INFO("Start encoder stream %s\n", s->name);

	switch (s->type) {
	case CX18_ENC_STREAM_TYPE_MPG:
		captype = CAPTURE_CHANNEL_TYPE_MPEG;
		cx->mpg_data_received = cx->vbi_data_inserted = 0;
		cx->dualwatch_jiffies = jiffies;
		cx->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode);
		cx->search_pack_header = 0;
		break;

	case CX18_ENC_STREAM_TYPE_IDX:
		captype = CAPTURE_CHANNEL_TYPE_INDEX;
		break;
	case CX18_ENC_STREAM_TYPE_TS:
		captype = CAPTURE_CHANNEL_TYPE_TS;
		break;
	case CX18_ENC_STREAM_TYPE_YUV:
		captype = CAPTURE_CHANNEL_TYPE_YUV;
		break;
	case CX18_ENC_STREAM_TYPE_PCM:
		captype = CAPTURE_CHANNEL_TYPE_PCM;
		break;
	case CX18_ENC_STREAM_TYPE_VBI:
#ifdef CX18_ENCODER_PARSES_SLICED
		captype = cx18_raw_vbi(cx) ?
		     CAPTURE_CHANNEL_TYPE_VBI : CAPTURE_CHANNEL_TYPE_SLICED_VBI;
#else
		/*
		 * Currently we set things up so that Sliced VBI from the
		 * digitizer is handled as Raw VBI by the encoder
		 */
		captype = CAPTURE_CHANNEL_TYPE_VBI;
#endif
		cx->vbi.frame = 0;
		cx->vbi.inserted_frame = 0;
		memset(cx->vbi.sliced_mpeg_size,
			0, sizeof(cx->vbi.sliced_mpeg_size));
		break;
	default:
		return -EINVAL;
	}

	/* Clear Streamoff flags in case left from last capture */
	clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);

	cx18_vapi_result(cx, data, CX18_CREATE_TASK, 1, CPU_CMD_MASK_CAPTURE);
	s->handle = data[0];
	cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype);

	/*
	 * For everything but CAPTURE_CHANNEL_TYPE_TS, play it safe and
	 * set up all the parameters, as it is not obvious which parameters the
	 * firmware shares across capture channel types and which it does not.
	 *
	 * Some of the cx18_vapi() calls below apply to only certain capture
	 * channel types.  We're hoping there's no harm in calling most of them
	 * anyway, as long as the values are all consistent.  Setting some
	 * shared parameters will have no effect once an analog capture channel
	 * has started streaming.
	 */
	if (captype != CAPTURE_CHANNEL_TYPE_TS) {
		cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0);
		cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1);
		cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 8, 0);
		cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 4, 1);

		/*
		 * Audio related reset according to
		 * Documentation/video4linux/cx2341x/fw-encoder-api.txt
		 */
		if (atomic_read(&cx->ana_capturing) == 0)
			cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
				  s->handle, 12);

		/*
		 * Number of lines for Field 1 & Field 2 according to
		 * Documentation/video4linux/cx2341x/fw-encoder-api.txt
		 * Field 1 is 312 for 625 line systems in BT.656
		 * Field 2 is 313 for 625 line systems in BT.656
		 */
		cx18_vapi(cx, CX18_CPU_SET_CAPTURE_LINE_NO, 3,
			  s->handle, 312, 313);

		if (cx->v4l2_cap & V4L2_CAP_VBI_CAPTURE)
			cx18_vbi_setup(s);

		/*
		 * Select to receive I, P, and B frame index entries, if the
		 * index stream is enabled.  Otherwise disable index entry
		 * generation.
		 */
		s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
		cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 2,
				 s->handle, cx18_stream_enabled(s_idx) ? 7 : 0);

		/* Call out to the common CX2341x API setup for user controls */
		cx->cxhdl.priv = s;
		cx2341x_handler_setup(&cx->cxhdl);

		/*
		 * When starting a capture and we're set for radio,
		 * ensure the video is muted, despite the user control.
		 */
		if (!cx->cxhdl.video_mute &&
		    test_bit(CX18_F_I_RADIO_USER, &cx->i_flags))
			cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
			  (v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8) | 1);

		/* Enable the Video Format Converter for UYVY 4:2:2 support,
		 * rather than the default HM12 Macroblovk 4:2:0 support.
		 */
		if (captype == CAPTURE_CHANNEL_TYPE_YUV) {
			if (s->pixelformat == V4L2_PIX_FMT_UYVY)
				cx18_vapi(cx, CX18_CPU_SET_VFC_PARAM, 2,
					s->handle, 1);
			else
				/* If in doubt, default to HM12 */
				cx18_vapi(cx, CX18_CPU_SET_VFC_PARAM, 2,
					s->handle, 0);
		}
	}

	if (atomic_read(&cx->tot_capturing) == 0) {
		cx2341x_handler_set_busy(&cx->cxhdl, 1);
		clear_bit(CX18_F_I_EOS, &cx->i_flags);
		cx18_write_reg(cx, 7, CX18_DSP0_INTERRUPT_MASK);
	}

	cx18_vapi(cx, CX18_CPU_DE_SET_MDL_ACK, 3, s->handle,
		(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
		(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);

	/* Init all the cpu_mdls for this stream */
	cx18_stream_configure_mdls(s);
	_cx18_stream_load_fw_queue(s);

	/* begin_capture */
	if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
		CX18_DEBUG_WARN("Error starting capture!\n");
		/* Ensure we're really not capturing before releasing MDLs */
		set_bit(CX18_F_S_STOPPING, &s->s_flags);
		if (s->type == CX18_ENC_STREAM_TYPE_MPG)
			cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
		else
			cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
		clear_bit(CX18_F_S_STREAMING, &s->s_flags);
		/* FIXME - CX18_F_S_STREAMOFF as well? */
		cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
		cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
		s->handle = CX18_INVALID_TASK_HANDLE;
		clear_bit(CX18_F_S_STOPPING, &s->s_flags);
		if (atomic_read(&cx->tot_capturing) == 0) {
			set_bit(CX18_F_I_EOS, &cx->i_flags);
			cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
		}
		return -EINVAL;
	}

	/* you're live! sit back and await interrupts :) */
	if (captype != CAPTURE_CHANNEL_TYPE_TS)
		atomic_inc(&cx->ana_capturing);
	atomic_inc(&cx->tot_capturing);
	return 0;
}
Exemple #11
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propagate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	int rc;

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0)
			return -EAGAIN;
		if (rc < 0)
			return rc;
	}

	return 0;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->tx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
		goto free_conn;

	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->rx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
		goto free_tx_tfm;
	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_tfm:
	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	if (tcp_sw_conn->tx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
	if (tcp_sw_conn->rx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;

	if (sock->sk->sk_sleep) {
		sock->sk->sk_err = EIO;
		wake_up_interruptible(sock->sk->sk_sleep);
	}

	/* stop xmit side */
	iscsi_suspend_tx(conn);

	/* stop recv side and release socket */
	iscsi_sw_tcp_release_conn(conn);

	iscsi_conn_stop(cls_conn, flag);
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct iscsi_session *session = cls_session->dd_data;
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	spin_lock_bh(&session->lock);
	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;
	spin_unlock_bh(&session->lock);

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	int value;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		sscanf(buf, "%d", &value);
		if (value <= 0 || !is_power_of_2(value))
			return -EINVAL;
		if (session->max_r2t == value)
			break;
		iscsi_tcp_r2tpool_free(session);
		iscsi_set_param(cls_conn, param, buf, buflen);
		if (iscsi_tcp_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sockaddr_in6 addr;
	int rc, len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
	case ISCSI_PARAM_CONN_ADDRESS:
		spin_lock_bh(&conn->session->lock);
		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
			spin_unlock_bh(&conn->session->lock);
			return -ENOTCONN;
		}
		rc = kernel_getpeername(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&conn->session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return 0;
}

static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
				       enum iscsi_host_param param, char *buf)
{
	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
	struct iscsi_session *session = tcp_sw_host->session;
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct sockaddr_in6 addr;
	int rc, len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		spin_lock_bh(&session->lock);
		conn = session->leadconn;
		if (!conn) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}
		tcp_conn = conn->dd_data;

		tcp_sw_conn = tcp_conn->dd_data;
		if (!tcp_sw_conn->sock) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}

		rc = kernel_getsockname(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_host_get_param(shost, param, buf);
	}

	return 0;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct iscsi_sw_tcp_host *tcp_sw_host;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
				 sizeof(struct iscsi_sw_tcp_host), 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;
	tcp_sw_host = iscsi_host_priv(shost);
	tcp_sw_host->session = session;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
	switch (param_type) {
	case ISCSI_HOST_PARAM:
		switch (param) {
		case ISCSI_HOST_PARAM_NETDEV_NAME:
		case ISCSI_HOST_PARAM_HWADDRESS:
		case ISCSI_HOST_PARAM_IPADDRESS:
		case ISCSI_HOST_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	case ISCSI_PARAM:
		switch (param) {
		case ISCSI_PARAM_MAX_RECV_DLENGTH:
		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
		case ISCSI_PARAM_HDRDGST_EN:
		case ISCSI_PARAM_DATADGST_EN:
		case ISCSI_PARAM_CONN_ADDRESS:
		case ISCSI_PARAM_CONN_PORT:
		case ISCSI_PARAM_EXP_STATSN:
		case ISCSI_PARAM_PERSISTENT_ADDRESS:
		case ISCSI_PARAM_PERSISTENT_PORT:
		case ISCSI_PARAM_PING_TMO:
		case ISCSI_PARAM_RECV_TMO:
		case ISCSI_PARAM_INITIAL_R2T_EN:
		case ISCSI_PARAM_MAX_R2T:
		case ISCSI_PARAM_IMM_DATA_EN:
		case ISCSI_PARAM_FIRST_BURST:
		case ISCSI_PARAM_MAX_BURST:
		case ISCSI_PARAM_PDU_INORDER_EN:
		case ISCSI_PARAM_DATASEQ_INORDER_EN:
		case ISCSI_PARAM_ERL:
		case ISCSI_PARAM_TARGET_NAME:
		case ISCSI_PARAM_TPGT:
		case ISCSI_PARAM_USERNAME:
		case ISCSI_PARAM_PASSWORD:
		case ISCSI_PARAM_USERNAME_IN:
		case ISCSI_PARAM_PASSWORD_IN:
		case ISCSI_PARAM_FAST_ABORT:
		case ISCSI_PARAM_ABORT_TMO:
		case ISCSI_PARAM_LU_RESET_TMO:
		case ISCSI_PARAM_TGT_RESET_TMO:
		case ISCSI_PARAM_IFACE_NAME:
		case ISCSI_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	}

	return 0;
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
int fimc_is_queue_buffer_queue(struct fimc_is_queue *queue,
	const struct fimc_is_vb2 *vb2,
	struct vb2_buffer *vb)
{
	u32 ret = 0, i;
	u32 index;
	u32 ext_size;
	u32 spare;
	struct fimc_is_framemgr *framemgr;
	struct fimc_is_frame *frame;

	index = vb->v4l2_buf.index;
	framemgr = &queue->framemgr;

	BUG_ON(framemgr->id == FRAMEMGR_ID_INVALID);

	/* plane address is updated for checking everytime */
	if (queue->framecfg.format.pixelformat == V4L2_PIX_FMT_YVU420M) {
		queue->buf_dva[index][0] = vb2->plane_addr(vb, 0);
		queue->buf_dva[index][1] = vb2->plane_addr(vb, 2);
		queue->buf_dva[index][2] = vb2->plane_addr(vb, 1);
		queue->buf_dva[index][3] = vb2->plane_addr(vb, 3);
	} else {
		for (i = 0; i < vb->num_planes; i++)
			queue->buf_dva[index][i] = vb2->plane_addr(vb, i);
	}

	frame = &framemgr->frame[index];

	/* uninitialized frame need to get info */
	if (frame->memory == FRAME_UNI_MEM)
		goto set_info;

	/* plane count check */
	if (frame->planes != vb->num_planes) {
		err("plane count is changed(%08X != %08X)",
			frame->planes, vb->num_planes);
		ret = -EINVAL;
		goto exit;
	}

	/* plane address check */
	for (i = 0; i < frame->planes; i++) {
		if (frame->dvaddr_buffer[i] != queue->buf_dva[index][i]) {
			err("buffer %d plane %d is changed(%08X != %08X)",
				index, i,
				frame->dvaddr_buffer[i],
				queue->buf_dva[index][i]);
			ret = -EINVAL;
			goto exit;
		}
	}

	goto exit;

set_info:
	if (test_bit(FIMC_IS_QUEUE_BUFFER_PREPARED, &queue->state)) {
		err("already prepared but new index(%d) is came", index);
		ret = -EINVAL;
		goto exit;
	}

	frame->vb = vb;
	frame->planes = vb->num_planes;
	spare = frame->planes - 1;

	for (i = 0; i < frame->planes; i++) {
		frame->dvaddr_buffer[i] = queue->buf_dva[index][i];
#ifdef PRINT_BUFADDR
		info("%04X %d.%d %08X\n", framemgr->id, index, i, frame->dvaddr_buffer[i]);
#endif
	}

	if (framemgr->id & FRAMEMGR_ID_SHOT) {
		ext_size = sizeof(struct camera2_shot_ext) - sizeof(struct camera2_shot);

		/* Create Kvaddr for Metadata */
		queue->buf_kva[index][spare] = vb2->plane_kvaddr(vb, spare);
		if (!queue->buf_kva[index][spare]) {
			err("plane_kvaddr is fail(%08X)", framemgr->id);
			ret = -EINVAL;
			goto exit;
		}

		frame->dvaddr_shot = queue->buf_dva[index][spare] + ext_size;
		frame->kvaddr_shot = queue->buf_kva[index][spare] + ext_size;
		frame->cookie_shot = (u32)vb2_plane_cookie(vb, spare);
		frame->shot = (struct camera2_shot *)frame->kvaddr_shot;
		frame->shot_ext = (struct camera2_shot_ext *)queue->buf_kva[index][spare];
		frame->shot_size = queue->framecfg.size[spare] - ext_size;
#ifdef MEASURE_TIME
		frame->tzone = (struct timeval *)frame->shot_ext->timeZone;
#endif
	} else {
		/* Create Kvaddr for frame sync */
		queue->buf_kva[index][spare] = vb2->plane_kvaddr(vb, spare);
		if (!queue->buf_kva[index][spare]) {
			err("plane_kvaddr is fail(%08X)", framemgr->id);
			ret = -EINVAL;
			goto exit;
		}

		frame->stream = (struct camera2_stream *)queue->buf_kva[index][spare];
		frame->stream->address = queue->buf_kva[index][spare];
		frame->stream_size = queue->framecfg.size[spare];
	}

	frame->memory = FRAME_INI_MEM;

	queue->buf_refcount++;

	if (queue->buf_rdycount == queue->buf_refcount)
		set_bit(FIMC_IS_QUEUE_BUFFER_READY, &queue->state);

	if (queue->buf_maxcount == queue->buf_refcount)
		set_bit(FIMC_IS_QUEUE_BUFFER_PREPARED, &queue->state);

exit:
	return ret;
}
Exemple #13
0
/*
 * journal_commit_transaction
 *
 * The primary function for committing a transaction to the log.  This
 * function is called by the journal thread to begin a complete commit.
 */
void journal_commit_transaction(journal_t *journal)
{
	transaction_t *commit_transaction;
	struct journal_head *jh, *new_jh, *descriptor;
	struct buffer_head *wbuf[64];
	int bufs;
	int flags;
	int err;
	unsigned long blocknr;
	char *tagp = NULL;
	journal_header_t *header;
	journal_block_tag_t *tag = NULL;
	int space_left = 0;
	int first_tag = 0;
	int tag_flag;
	int i;

	/*
	 * First job: lock down the current transaction and wait for
	 * all outstanding updates to complete.
	 */

#ifdef COMMIT_STATS
	spin_lock(&journal->j_list_lock);
	summarise_journal_usage(journal);
	spin_unlock(&journal->j_list_lock);
#endif

	/* Do we need to erase the effects of a prior journal_flush? */
	if (journal->j_flags & JFS_FLUSHED) {
		jbd_debug(3, "super block updated\n");
		journal_update_superblock(journal, 1);
	} else {
		jbd_debug(3, "superblock not updated\n");
	}

	J_ASSERT(journal->j_running_transaction != NULL);
	J_ASSERT(journal->j_committing_transaction == NULL);

	commit_transaction = journal->j_running_transaction;
	J_ASSERT(commit_transaction->t_state == T_RUNNING);

	jbd_debug(1, "JBD: starting commit of transaction %d\n",
			commit_transaction->t_tid);

	spin_lock(&journal->j_state_lock);
	commit_transaction->t_state = T_LOCKED;

	spin_lock(&commit_transaction->t_handle_lock);
	while (commit_transaction->t_updates) {
		DEFINE_WAIT(wait);

		prepare_to_wait(&journal->j_wait_updates, &wait,
					TASK_UNINTERRUPTIBLE);
		if (commit_transaction->t_updates) {
			spin_unlock(&commit_transaction->t_handle_lock);
			spin_unlock(&journal->j_state_lock);
			schedule();
			spin_lock(&journal->j_state_lock);
			spin_lock(&commit_transaction->t_handle_lock);
		}
		finish_wait(&journal->j_wait_updates, &wait);
	}
	spin_unlock(&commit_transaction->t_handle_lock);

	J_ASSERT (commit_transaction->t_outstanding_credits <=
			journal->j_max_transaction_buffers);

	/*
	 * First thing we are allowed to do is to discard any remaining
	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
	 * that there are no such buffers: if a large filesystem
	 * operation like a truncate needs to split itself over multiple
	 * transactions, then it may try to do a journal_restart() while
	 * there are still BJ_Reserved buffers outstanding.  These must
	 * be released cleanly from the current transaction.
	 *
	 * In this case, the filesystem must still reserve write access
	 * again before modifying the buffer in the new transaction, but
	 * we do not require it to remember exactly which old buffers it
	 * has reserved.  This is consistent with the existing behaviour
	 * that multiple journal_get_write_access() calls to the same
	 * buffer are perfectly permissable.
	 */
	while (commit_transaction->t_reserved_list) {
		jh = commit_transaction->t_reserved_list;
		JBUFFER_TRACE(jh, "reserved, unused: refile");
		/*
		 * A journal_get_undo_access()+journal_release_buffer() may
		 * leave undo-committed data.
		 */
		if (jh->b_committed_data) {
			struct buffer_head *bh = jh2bh(jh);

			jbd_lock_bh_state(bh);
			if (jh->b_committed_data) {
				kfree(jh->b_committed_data);
				jh->b_committed_data = NULL;
			}
			jbd_unlock_bh_state(bh);
		}
		journal_refile_buffer(journal, jh);
	}

	/*
	 * Now try to drop any written-back buffers from the journal's
	 * checkpoint lists.  We do this *before* commit because it potentially
	 * frees some memory
	 */
	spin_lock(&journal->j_list_lock);
	__journal_clean_checkpoint_list(journal);
	spin_unlock(&journal->j_list_lock);

	jbd_debug (3, "JBD: commit phase 1\n");

	/*
	 * Switch to a new revoke table.
	 */
	journal_switch_revoke_table(journal);

	commit_transaction->t_state = T_FLUSH;
	journal->j_committing_transaction = commit_transaction;
	journal->j_running_transaction = NULL;
	commit_transaction->t_log_start = journal->j_head;
	wake_up(&journal->j_wait_transaction_locked);
	spin_unlock(&journal->j_state_lock);

	jbd_debug (3, "JBD: commit phase 2\n");

	/*
	 * Now start flushing things to disk, in the order they appear
	 * on the transaction lists.  Data blocks go first.
	 */

	err = 0;
	/*
	 * Whenever we unlock the journal and sleep, things can get added
	 * onto ->t_sync_datalist, so we have to keep looping back to
	 * write_out_data until we *know* that the list is empty.
	 */
	bufs = 0;
	/*
	 * Cleanup any flushed data buffers from the data list.  Even in
	 * abort mode, we want to flush this out as soon as possible.
	 */
write_out_data:
	cond_resched();
	spin_lock(&journal->j_list_lock);

	while (commit_transaction->t_sync_datalist) {
		struct buffer_head *bh;

		jh = commit_transaction->t_sync_datalist;
		commit_transaction->t_sync_datalist = jh->b_tnext;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			BUFFER_TRACE(bh, "locked");
			if (!inverted_lock(journal, bh))
				goto write_out_data;
			__journal_unfile_buffer(jh);
			__journal_file_buffer(jh, commit_transaction,
						BJ_Locked);
			jbd_unlock_bh_state(bh);
			if (lock_need_resched(&journal->j_list_lock)) {
				spin_unlock(&journal->j_list_lock);
				goto write_out_data;
			}
		} else {
			if (buffer_dirty(bh)) {
				BUFFER_TRACE(bh, "start journal writeout");
				get_bh(bh);
				wbuf[bufs++] = bh;
				if (bufs == ARRAY_SIZE(wbuf)) {
					jbd_debug(2, "submit %d writes\n",
							bufs);
					spin_unlock(&journal->j_list_lock);
					ll_rw_block(WRITE, bufs, wbuf);
					journal_brelse_array(wbuf, bufs);
					bufs = 0;
					goto write_out_data;
				}
			} else {
				BUFFER_TRACE(bh, "writeout complete: unfile");
				if (!inverted_lock(journal, bh))
					goto write_out_data;
				__journal_unfile_buffer(jh);
				jbd_unlock_bh_state(bh);
				journal_remove_journal_head(bh);
				put_bh(bh);
				if (lock_need_resched(&journal->j_list_lock)) {
					spin_unlock(&journal->j_list_lock);
					goto write_out_data;
				}
			}
		}
	}

	if (bufs) {
		spin_unlock(&journal->j_list_lock);
		ll_rw_block(WRITE, bufs, wbuf);
		journal_brelse_array(wbuf, bufs);
		spin_lock(&journal->j_list_lock);
	}

	/*
	 * Wait for all previously submitted IO to complete.
	 */
	while (commit_transaction->t_locked_list) {
		struct buffer_head *bh;

		jh = commit_transaction->t_locked_list->b_tprev;
		bh = jh2bh(jh);
		get_bh(bh);
		if (buffer_locked(bh)) {
			spin_unlock(&journal->j_list_lock);
			wait_on_buffer(bh);
			if (unlikely(!buffer_uptodate(bh)))
				err = -EIO;
			spin_lock(&journal->j_list_lock);
		}
		if (!inverted_lock(journal, bh)) {
			put_bh(bh);
			spin_lock(&journal->j_list_lock);
			continue;
		}
		if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
			__journal_unfile_buffer(jh);
			jbd_unlock_bh_state(bh);
			journal_remove_journal_head(bh);
			put_bh(bh);
		} else {
			jbd_unlock_bh_state(bh);
		}
		put_bh(bh);
//		cond_resched_lock(&journal->j_list_lock);
	}
	spin_unlock(&journal->j_list_lock);

	journal_write_revoke_records(journal, commit_transaction);

	jbd_debug(3, "JBD: commit phase 2\n");

	/*
	 * If we found any dirty or locked buffers, then we should have
	 * looped back up to the write_out_data label.  If there weren't
	 * any then journal_clean_data_list should have wiped the list
	 * clean by now, so check that it is in fact empty.
	 */
	J_ASSERT (commit_transaction->t_sync_datalist == NULL);

	jbd_debug (3, "JBD: commit phase 3\n");

	/*
	 * Way to go: we have now written out all of the data for a
	 * transaction!  Now comes the tricky part: we need to write out
	 * metadata.  Loop over the transaction's entire buffer list:
	 */
	commit_transaction->t_state = T_COMMIT;

	descriptor = NULL;
	bufs = 0;
	while (commit_transaction->t_buffers) {

		/* Find the next buffer to be journaled... */

		jh = commit_transaction->t_buffers;

		/* If we're in abort mode, we just un-journal the buffer and
		   release it for background writing. */

		if (is_journal_aborted(journal)) {
			JBUFFER_TRACE(jh, "journal is aborting: refile");
			journal_refile_buffer(journal, jh);
			/* If that was the last one, we need to clean up
			 * any descriptor buffers which may have been
			 * already allocated, even if we are now
			 * aborting. */
			if (!commit_transaction->t_buffers)
				goto start_journal_io;
			continue;
		}

		/* Make sure we have a descriptor block in which to
		   record the metadata buffer. */

		if (!descriptor) {
			struct buffer_head *bh;

			J_ASSERT (bufs == 0);

			jbd_debug(4, "JBD: get descriptor\n");

			descriptor = journal_get_descriptor_buffer(journal);
			if (!descriptor) {
				__journal_abort_hard(journal);
				continue;
			}

			bh = jh2bh(descriptor);
			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
				(unsigned long long)bh->b_blocknr, bh->b_data);
			header = (journal_header_t *)&bh->b_data[0];
			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);

			tagp = &bh->b_data[sizeof(journal_header_t)];
			space_left = bh->b_size - sizeof(journal_header_t);
			first_tag = 1;
			set_buffer_jwrite(bh);
			set_buffer_dirty(bh);
			wbuf[bufs++] = bh;

			/* Record it so that we can wait for IO
                           completion later */
			BUFFER_TRACE(bh, "ph3: file as descriptor");
			journal_file_buffer(descriptor, commit_transaction,
					BJ_LogCtl);
		}

		/* Where is the buffer to be written? */

		err = journal_next_log_block(journal, &blocknr);
		/* If the block mapping failed, just abandon the buffer
		   and repeat this loop: we'll fall into the
		   refile-on-abort condition above. */
		if (err) {
			__journal_abort_hard(journal);
			continue;
		}

		/*
		 * start_this_handle() uses t_outstanding_credits to determine
		 * the free space in the log, but this counter is changed
		 * by journal_next_log_block() also.
		 */
		commit_transaction->t_outstanding_credits--;

		/* Bump b_count to prevent truncate from stumbling over
                   the shadowed buffer!  @@@ This can go if we ever get
                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
		atomic_inc(&jh2bh(jh)->b_count);

		/* Make a temporary IO buffer with which to write it out
                   (this will requeue both the metadata buffer and the
                   temporary IO buffer). new_bh goes on BJ_IO*/

		set_bit(BH_JWrite, &jh2bh(jh)->b_state);
		/*
		 * akpm: journal_write_metadata_buffer() sets
		 * new_bh->b_transaction to commit_transaction.
		 * We need to clean this up before we release new_bh
		 * (which is of type BJ_IO)
		 */
		JBUFFER_TRACE(jh, "ph3: write metadata");
		flags = journal_write_metadata_buffer(commit_transaction,
						      jh, &new_jh, blocknr);
		set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
		wbuf[bufs++] = jh2bh(new_jh);

		/* Record the new block's tag in the current descriptor
                   buffer */

		tag_flag = 0;
		if (flags & 1)
			tag_flag |= JFS_FLAG_ESCAPE;
		if (!first_tag)
			tag_flag |= JFS_FLAG_SAME_UUID;

		tag = (journal_block_tag_t *) tagp;
		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
		tag->t_flags = cpu_to_be32(tag_flag);
		tagp += sizeof(journal_block_tag_t);
		space_left -= sizeof(journal_block_tag_t);

		if (first_tag) {
			memcpy (tagp, journal->j_uuid, 16);
			tagp += 16;
			space_left -= 16;
			first_tag = 0;
		}

		/* If there's no more to do, or if the descriptor is full,
		   let the IO rip! */

		if (bufs == ARRAY_SIZE(wbuf) ||
		    commit_transaction->t_buffers == NULL ||
		    space_left < sizeof(journal_block_tag_t) + 16) {

			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);

			/* Write an end-of-descriptor marker before
                           submitting the IOs.  "tag" still points to
                           the last tag we set up. */

			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);

start_journal_io:
			for (i = 0; i < bufs; i++) {
				struct buffer_head *bh = wbuf[i];
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				set_buffer_uptodate(bh);
				bh->b_end_io = journal_end_buffer_io_sync;
				submit_bh(WRITE, bh);
			}
			cond_resched();

			/* Force a new descriptor to be generated next
                           time round the loop. */
			descriptor = NULL;
			bufs = 0;
		}
	}

	/* Lo and behold: we have just managed to send a transaction to
           the log.  Before we can commit it, wait for the IO so far to
           complete.  Control buffers being written are on the
           transaction's t_log_list queue, and metadata buffers are on
           the t_iobuf_list queue.

	   Wait for the buffers in reverse order.  That way we are
	   less likely to be woken up until all IOs have completed, and
	   so we incur less scheduling load.
	*/

	jbd_debug(3, "JBD: commit phase 4\n");

	/*
	 * akpm: these are BJ_IO, and j_list_lock is not needed.
	 * See __journal_try_to_free_buffer.
	 */
wait_for_iobuf:
	while (commit_transaction->t_iobuf_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_iobuf_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_iobuf;
		}
		if (cond_resched())
			goto wait_for_iobuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		clear_buffer_jwrite(bh);

		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
		journal_unfile_buffer(journal, jh);

		/*
		 * ->t_iobuf_list should contain only dummy buffer_heads
		 * which were created by journal_write_metadata_buffer().
		 */
		BUFFER_TRACE(bh, "dumping temporary bh");
		journal_put_journal_head(jh);
		__brelse(bh);
		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
		free_buffer_head(bh);

		/* We also have to unlock and free the corresponding
                   shadowed buffer */
		jh = commit_transaction->t_shadow_list->b_tprev;
		bh = jh2bh(jh);
		clear_bit(BH_JWrite, &bh->b_state);
		J_ASSERT_BH(bh, buffer_jbddirty(bh));

		/* The metadata is now released for reuse, but we need
                   to remember it against this transaction so that when
                   we finally commit, we can do any checkpointing
                   required. */
		JBUFFER_TRACE(jh, "file as BJ_Forget");
		journal_file_buffer(jh, commit_transaction, BJ_Forget);
		/* Wake up any transactions which were waiting for this
		   IO to complete */
		wake_up_bit(&bh->b_state, BH_Unshadow);
		JBUFFER_TRACE(jh, "brelse shadowed buffer");
		__brelse(bh);
	}

	J_ASSERT (commit_transaction->t_shadow_list == NULL);

	jbd_debug(3, "JBD: commit phase 5\n");

	/* Here we wait for the revoke record and descriptor record buffers */
 wait_for_ctlbuf:
	while (commit_transaction->t_log_list != NULL) {
		struct buffer_head *bh;

		jh = commit_transaction->t_log_list->b_tprev;
		bh = jh2bh(jh);
		if (buffer_locked(bh)) {
			wait_on_buffer(bh);
			goto wait_for_ctlbuf;
		}
		if (cond_resched())
			goto wait_for_ctlbuf;

		if (unlikely(!buffer_uptodate(bh)))
			err = -EIO;

		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
		clear_buffer_jwrite(bh);
		journal_unfile_buffer(journal, jh);
		journal_put_journal_head(jh);
		__brelse(bh);		/* One for getblk */
		/* AKPM: bforget here */
	}

	jbd_debug(3, "JBD: commit phase 6\n");

	if (is_journal_aborted(journal))
		goto skip_commit;

	/* Done it all: now write the commit record.  We should have
	 * cleaned up our previous buffers by now, so if we are in abort
	 * mode we can now just skip the rest of the journal write
	 * entirely. */

	descriptor = journal_get_descriptor_buffer(journal);
	if (!descriptor) {
		__journal_abort_hard(journal);
		goto skip_commit;
	}

	/* AKPM: buglet - add `i' to tmp! */
	for (i = 0; i < jh2bh(descriptor)->b_size; i += 512) {
		journal_header_t *tmp =
			(journal_header_t*)jh2bh(descriptor)->b_data;
		tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
		tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
		tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
	}

	JBUFFER_TRACE(descriptor, "write commit block");
	{
		struct buffer_head *bh = jh2bh(descriptor);
		int ret;
		int barrier_done = 0;

		set_buffer_dirty(bh);
		if (journal->j_flags & JFS_BARRIER) {
			set_buffer_ordered(bh);
			barrier_done = 1;
		}
		ret = sync_dirty_buffer(bh);
		/* is it possible for another commit to fail at roughly
		 * the same time as this one?  If so, we don't want to
		 * trust the barrier flag in the super, but instead want
		 * to remember if we sent a barrier request
		 */
		if (ret == -EOPNOTSUPP && barrier_done) {
			char b[BDEVNAME_SIZE];

			printk(KERN_WARNING
				"JBD: barrier-based sync failed on %s - "
				"disabling barriers\n",
				bdevname(journal->j_dev, b));
			spin_lock(&journal->j_state_lock);
			journal->j_flags &= ~JFS_BARRIER;
			spin_unlock(&journal->j_state_lock);

			/* And try again, without the barrier */
			clear_buffer_ordered(bh);
			set_buffer_uptodate(bh);
			set_buffer_dirty(bh);
			ret = sync_dirty_buffer(bh);
		}
		if (unlikely(ret == -EIO))
			err = -EIO;
		put_bh(bh);		/* One for getblk() */
		journal_put_journal_head(descriptor);
	}

	/* End of a transaction!  Finally, we can do checkpoint
           processing: any buffers committed as a result of this
           transaction can be removed from any checkpoint list it was on
           before. */

skip_commit: /* The journal should be unlocked by now. */

	if (err)
		__journal_abort_hard(journal);

	jbd_debug(3, "JBD: commit phase 7\n");

	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
	J_ASSERT(commit_transaction->t_buffers == NULL);
	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
	J_ASSERT(commit_transaction->t_shadow_list == NULL);
	J_ASSERT(commit_transaction->t_log_list == NULL);

restart_loop:
	while (commit_transaction->t_forget) {
		transaction_t *cp_transaction;
		struct buffer_head *bh;

		jh = commit_transaction->t_forget;
		bh = jh2bh(jh);
		jbd_lock_bh_state(bh);
		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
			jh->b_transaction == journal->j_running_transaction);

		/*
		 * If there is undo-protected committed data against
		 * this buffer, then we can remove it now.  If it is a
		 * buffer needing such protection, the old frozen_data
		 * field now points to a committed version of the
		 * buffer, so rotate that field to the new committed
		 * data.
		 *
		 * Otherwise, we can just throw away the frozen data now.
		 */
		if (jh->b_committed_data) {
			kfree(jh->b_committed_data);
			jh->b_committed_data = NULL;
			if (jh->b_frozen_data) {
				jh->b_committed_data = jh->b_frozen_data;
				jh->b_frozen_data = NULL;
			}
		} else if (jh->b_frozen_data) {
			kfree(jh->b_frozen_data);
			jh->b_frozen_data = NULL;
		}

		spin_lock(&journal->j_list_lock);
		cp_transaction = jh->b_cp_transaction;
		if (cp_transaction) {
			JBUFFER_TRACE(jh, "remove from old cp transaction");
			__journal_remove_checkpoint(jh);
		}

		/* Only re-checkpoint the buffer_head if it is marked
		 * dirty.  If the buffer was added to the BJ_Forget list
		 * by journal_forget, it may no longer be dirty and
		 * there's no point in keeping a checkpoint record for
		 * it. */

		/* A buffer which has been freed while still being
		 * journaled by a previous transaction may end up still
		 * being dirty here, but we want to avoid writing back
		 * that buffer in the future now that the last use has
		 * been committed.  That's not only a performance gain,
		 * it also stops aliasing problems if the buffer is left
		 * behind for writeback and gets reallocated for another
		 * use in a different page. */
		if (buffer_freed(bh)) {
			clear_buffer_freed(bh);
			clear_buffer_jbddirty(bh);
		}

		if (buffer_jbddirty(bh)) {
			JBUFFER_TRACE(jh, "add to new checkpointing trans");
			__journal_insert_checkpoint(jh, commit_transaction);
			JBUFFER_TRACE(jh, "refile for checkpoint writeback");
			__journal_refile_buffer(jh);
			jbd_unlock_bh_state(bh);
		} else {
			J_ASSERT_BH(bh, !buffer_dirty(bh));
			J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
			__journal_unfile_buffer(jh);
			jbd_unlock_bh_state(bh);
			journal_remove_journal_head(bh);  /* needs a brelse */
			release_buffer_page(bh);
		}
		spin_unlock(&journal->j_list_lock);
		if (cond_resched())
			goto restart_loop;
	}

	/* Done with this transaction! */

	jbd_debug(3, "JBD: commit phase 8\n");

	J_ASSERT(commit_transaction->t_state == T_COMMIT);

	/*
	 * This is a bit sleazy.  We borrow j_list_lock to protect
	 * journal->j_committing_transaction in __journal_remove_checkpoint.
	 * Really, __jornal_remove_checkpoint should be using j_state_lock but
	 * it's a bit hassle to hold that across __journal_remove_checkpoint
	 */
	spin_lock(&journal->j_state_lock);
	spin_lock(&journal->j_list_lock);
	commit_transaction->t_state = T_FINISHED;
	J_ASSERT(commit_transaction == journal->j_committing_transaction);
	journal->j_commit_sequence = commit_transaction->t_tid;
	journal->j_committing_transaction = NULL;
	spin_unlock(&journal->j_state_lock);

	if (commit_transaction->t_checkpoint_list == NULL) {
		__journal_drop_transaction(journal, commit_transaction);
	} else {
		if (journal->j_checkpoint_transactions == NULL) {
			journal->j_checkpoint_transactions = commit_transaction;
			commit_transaction->t_cpnext = commit_transaction;
			commit_transaction->t_cpprev = commit_transaction;
		} else {
			commit_transaction->t_cpnext =
				journal->j_checkpoint_transactions;
			commit_transaction->t_cpprev =
				commit_transaction->t_cpnext->t_cpprev;
			commit_transaction->t_cpnext->t_cpprev =
				commit_transaction;
			commit_transaction->t_cpprev->t_cpnext =
				commit_transaction;
		}
	}
	spin_unlock(&journal->j_list_lock);

	jbd_debug(1, "JBD: commit %d complete, head %d\n",
		  journal->j_commit_sequence, journal->j_tail_sequence);

	wake_up(&journal->j_wait_done_commit);
}
static int tegra_ehci_hub_control(
	struct usb_hcd	*hcd,
	u16		typeReq,
	u16		wValue,
	u16		wIndex,
	char		*buf,
	u16		wLength
)
{
	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
	struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
	u32 __iomem	*status_reg;
	u32		temp;
	unsigned long	flags;
	int		retval = 0;

	status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];

	spin_lock_irqsave(&ehci->lock, flags);

	if (typeReq == GetPortStatus) {
		temp = ehci_readl(ehci, status_reg);
		if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
			/* Resume completed, re-enable disconnect detection */
			tegra->port_resuming = 0;
			tegra_usb_phy_postresume(hcd->usb_phy);
		}
	}

	else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
		temp = ehci_readl(ehci, status_reg);
		if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
			retval = -EPIPE;
			goto done;
		}

		temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
		temp |= PORT_WKDISC_E | PORT_WKOC_E;
		ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);

		/*
		 * If a transaction is in progress, there may be a delay in
		 * suspending the port. Poll until the port is suspended.
		 */
		if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
						PORT_SUSPEND, 5000))
			pr_err("%s: timeout waiting for SUSPEND\n", __func__);

		set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
		goto done;
	}

	/* For USB1 port we need to issue Port Reset twice internally */
	if (tegra->needs_double_reset &&
	   (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
		spin_unlock_irqrestore(&ehci->lock, flags);
		return tegra_ehci_internal_port_reset(ehci, status_reg);
	}

	/*
	 * Tegra host controller will time the resume operation to clear the bit
	 * when the port control state switches to HS or FS Idle. This behavior
	 * is different from EHCI where the host controller driver is required
	 * to set this bit to a zero after the resume duration is timed in the
	 * driver.
	 */
	else if (typeReq == ClearPortFeature &&
					wValue == USB_PORT_FEAT_SUSPEND) {
		temp = ehci_readl(ehci, status_reg);
		if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
			retval = -EPIPE;
			goto done;
		}

		if (!(temp & PORT_SUSPEND))
			goto done;

		/* Disable disconnect detection during port resume */
		tegra_usb_phy_preresume(hcd->usb_phy);

		ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);

		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
		/* start resume signalling */
		ehci_writel(ehci, temp | PORT_RESUME, status_reg);
		set_bit(wIndex-1, &ehci->resuming_ports);

		spin_unlock_irqrestore(&ehci->lock, flags);
		msleep(20);
		spin_lock_irqsave(&ehci->lock, flags);

		/* Poll until the controller clears RESUME and SUSPEND */
		if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
			pr_err("%s: timeout waiting for RESUME\n", __func__);
		if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
			pr_err("%s: timeout waiting for SUSPEND\n", __func__);

		ehci->reset_done[wIndex-1] = 0;
		clear_bit(wIndex-1, &ehci->resuming_ports);

		tegra->port_resuming = 1;
		goto done;
	}

	spin_unlock_irqrestore(&ehci->lock, flags);

	/* Handle the hub control events here */
	return orig_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);

done:
	spin_unlock_irqrestore(&ehci->lock, flags);
	return retval;
}
Exemple #15
0
/*
 * remove a file from an AFS filesystem
 */
static int afs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct afs_vnode *dvnode, *vnode;
	struct key *key;
	int ret;

	dvnode = AFS_FS_I(dir);

	_enter("{%x:%u},{%s}",
	       dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);

	ret = -ENAMETOOLONG;
	if (dentry->d_name.len >= AFSNAMEMAX)
		goto error;

	key = afs_request_key(dvnode->volume->cell);
	if (IS_ERR(key)) {
		ret = PTR_ERR(key);
		goto error;
	}

	if (dentry->d_inode) {
		vnode = AFS_FS_I(dentry->d_inode);

		/* make sure we have a callback promise on the victim */
		ret = afs_validate(vnode, key);
		if (ret < 0)
			goto error;
	}

	ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, false);
	if (ret < 0)
		goto remove_error;

	if (dentry->d_inode) {
		/* if the file wasn't deleted due to excess hard links, the
		 * fileserver will break the callback promise on the file - if
		 * it had one - before it returns to us, and if it was deleted,
		 * it won't
		 *
		 * however, if we didn't have a callback promise outstanding,
		 * or it was outstanding on a different server, then it won't
		 * break it either...
		 */
		vnode = AFS_FS_I(dentry->d_inode);
		if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
			_debug("AFS_VNODE_DELETED");
		if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
			_debug("AFS_VNODE_CB_BROKEN");
		set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
		ret = afs_validate(vnode, key);
		_debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
	}

	key_put(key);
	_leave(" = 0");
	return 0;

remove_error:
	key_put(key);
error:
	_leave(" = %d", ret);
	return ret;
}
Exemple #16
0
static int join(struct mddev *mddev, int nodes)
{
	struct md_cluster_info *cinfo;
	int ret, ops_rv;
	char str[64];

	cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	INIT_LIST_HEAD(&cinfo->suspend_list);
	spin_lock_init(&cinfo->suspend_lock);
	init_completion(&cinfo->completion);
	set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
	init_waitqueue_head(&cinfo->wait);
	mutex_init(&cinfo->recv_mutex);

	mddev->cluster_info = cinfo;

	memset(str, 0, 64);
	sprintf(str, "%pU", mddev->uuid);
	ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
				DLM_LSFL_FS, LVB_SIZE,
				&md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
	if (ret)
		goto err;
	wait_for_completion(&cinfo->completion);
	if (nodes < cinfo->slot_number) {
		pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
			cinfo->slot_number, nodes);
		ret = -ERANGE;
		goto err;
	}
	/* Initiate the communication resources */
	ret = -ENOMEM;
	cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
	if (!cinfo->recv_thread) {
		pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
		goto err;
	}
	cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
	if (!cinfo->message_lockres)
		goto err;
	cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
	if (!cinfo->token_lockres)
		goto err;
	cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
	if (!cinfo->no_new_dev_lockres)
		goto err;

	ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
	if (ret) {
		ret = -EAGAIN;
		pr_err("md-cluster: can't join cluster to avoid lock issue\n");
		goto err;
	}
	cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
	if (!cinfo->ack_lockres)
		goto err;
	/* get sync CR lock on ACK. */
	if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
		pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
				ret);
	dlm_unlock_sync(cinfo->token_lockres);
	/* get sync CR lock on no-new-dev. */
	if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
		pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);


	pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
	snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
	cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
	if (!cinfo->bitmap_lockres)
		goto err;
	if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
		pr_err("Failed to get bitmap lock\n");
		ret = -EINVAL;
		goto err;
	}

	cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
	if (!cinfo->resync_lockres)
		goto err;

	return 0;
err:
	md_unregister_thread(&cinfo->recovery_thread);
	md_unregister_thread(&cinfo->recv_thread);
	lockres_free(cinfo->message_lockres);
	lockres_free(cinfo->token_lockres);
	lockres_free(cinfo->ack_lockres);
	lockres_free(cinfo->no_new_dev_lockres);
	lockres_free(cinfo->resync_lockres);
	lockres_free(cinfo->bitmap_lockres);
	if (cinfo->lockspace)
		dlm_release_lockspace(cinfo->lockspace, 2);
	mddev->cluster_info = NULL;
	kfree(cinfo);
	return ret;
}
Exemple #17
0
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
    struct acm *acm;
    int rv = -ENODEV;
    int i;
    dbg("Entering acm_tty_open.");

    mutex_lock(&open_mutex);

    acm = acm_table[tty->index];
    if (!acm || !acm->dev)
        goto err_out;
    else
        rv = 0;

    set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);

    tty->driver_data = acm;
    tty_port_tty_set(&acm->port, tty);

    if (usb_autopm_get_interface(acm->control) < 0)
        goto early_bail;
    else
        acm->control->needs_remote_wakeup = 1;

    mutex_lock(&acm->mutex);
    if (acm->port.count++) {
        usb_autopm_put_interface(acm->control);
        goto done;
    }

    acm->ctrlurb->dev = acm->dev;
    if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
        dbg("usb_submit_urb(ctrl irq) failed");
        goto bail_out;
    }

    if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
            (acm->ctrl_caps & USB_CDC_CAP_LINE))
        goto full_bailout;

    usb_autopm_put_interface(acm->control);

    INIT_LIST_HEAD(&acm->spare_read_urbs);
    INIT_LIST_HEAD(&acm->spare_read_bufs);
    INIT_LIST_HEAD(&acm->filled_read_bufs);

    for (i = 0; i < acm->rx_buflimit; i++)
        list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
    for (i = 0; i < acm->rx_buflimit; i++)
        list_add(&(acm->rb[i].list), &acm->spare_read_bufs);

    acm->throttle = 0;

    tasklet_schedule(&acm->urb_task);
    rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
    mutex_unlock(&acm->mutex);
err_out:
    mutex_unlock(&open_mutex);
    return rv;

full_bailout:
    usb_kill_urb(acm->ctrlurb);
bail_out:
    usb_autopm_put_interface(acm->control);
    acm->port.count--;
    mutex_unlock(&acm->mutex);
early_bail:
    mutex_unlock(&open_mutex);
    tty_port_tty_set(&acm->port, NULL);
    return -EIO;
}
Exemple #18
0
/*
 * driver allocation handlers.
 */
int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
	int retval = -ENOMEM;

	mutex_init(&rt2x00dev->csr_mutex);

	set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);

	/*
	 * Make room for rt2x00_intf inside the per-interface
	 * structure ieee80211_vif.
	 */
	rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);

	/*
	 * Determine which operating modes are supported, all modes
	 * which require beaconing, depend on the availability of
	 * beacon entries.
	 */
	rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
	if (rt2x00dev->ops->bcn->entry_num > 0)
		rt2x00dev->hw->wiphy->interface_modes |=
		    BIT(NL80211_IFTYPE_ADHOC) |
		    BIT(NL80211_IFTYPE_AP) |
		    BIT(NL80211_IFTYPE_MESH_POINT) |
		    BIT(NL80211_IFTYPE_WDS);

	/*
	 * Initialize configuration work.
	 */
	INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);

	/*
	 * Let the driver probe the device to detect the capabilities.
	 */
	retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to allocate device.\n");
		goto exit;
	}

	/*
	 * Allocate queue array.
	 */
	retval = rt2x00queue_allocate(rt2x00dev);
	if (retval)
		goto exit;

	/*
	 * Initialize ieee80211 structure.
	 */
	retval = rt2x00lib_probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to initialize hw.\n");
		goto exit;
	}

	/*
	 * Register extra components.
	 */
	rt2x00link_register(rt2x00dev);
	rt2x00leds_register(rt2x00dev);
	rt2x00debug_register(rt2x00dev);

	return 0;

exit:
	rt2x00lib_remove_dev(rt2x00dev);

	return retval;
}
Exemple #19
0
int btbcm_setup_patchram(struct hci_dev *hdev)
{
	char fw_name[64];
	const struct firmware *fw;
	u16 subver, rev, pid, vid;
	const char *hw_name = NULL;
	struct sk_buff *skb;
	struct hci_rp_read_local_version *ver;
	int i, err;

	/* Reset */
	err = btbcm_reset(hdev);
	if (err)
		return err;

	/* Read Local Version Info */
	skb = btbcm_read_local_version(hdev);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	ver = (struct hci_rp_read_local_version *)skb->data;
	rev = le16_to_cpu(ver->hci_rev);
	subver = le16_to_cpu(ver->lmp_subver);
	kfree_skb(skb);

	/* Read Verbose Config Version Info */
	skb = btbcm_read_verbose_config(hdev);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
	kfree_skb(skb);

	/* Read Local Name */
	skb = btbcm_read_local_name(hdev);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
	kfree_skb(skb);

	switch ((rev & 0xf000) >> 12) {
	case 0:
	case 3:
		for (i = 0; bcm_uart_subver_table[i].name; i++) {
			if (subver == bcm_uart_subver_table[i].subver) {
				hw_name = bcm_uart_subver_table[i].name;
				break;
			}
		}

		snprintf(fw_name, sizeof(fw_name), "brcm/%s.hcd",
			 hw_name ? : "BCM");
		break;
	case 1:
	case 2:
		/* Read USB Product Info */
		skb = btbcm_read_usb_product(hdev);
		if (IS_ERR(skb))
			return PTR_ERR(skb);

		vid = get_unaligned_le16(skb->data + 1);
		pid = get_unaligned_le16(skb->data + 3);
		kfree_skb(skb);

		for (i = 0; bcm_usb_subver_table[i].name; i++) {
			if (subver == bcm_usb_subver_table[i].subver) {
				hw_name = bcm_usb_subver_table[i].name;
				break;
			}
		}

		snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd",
			 hw_name ? : "BCM", vid, pid);
		break;
	default:
		return 0;
	}

	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
		hw_name ? : "BCM", (subver & 0xe000) >> 13,
		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);

	err = request_firmware(&fw, fw_name, &hdev->dev);
	if (err < 0) {
		BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
		return 0;
	}

	btbcm_patchram(hdev, fw);

	release_firmware(fw);

	/* Reset */
	err = btbcm_reset(hdev);
	if (err)
		return err;

	/* Read Local Version Info */
	skb = btbcm_read_local_version(hdev);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	ver = (struct hci_rp_read_local_version *)skb->data;
	rev = le16_to_cpu(ver->hci_rev);
	subver = le16_to_cpu(ver->lmp_subver);
	kfree_skb(skb);

	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
		hw_name ? : "BCM", (subver & 0xe000) >> 13,
		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);

	/* Read Local Name */
	skb = btbcm_read_local_name(hdev);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
	kfree_skb(skb);

	btbcm_check_bdaddr(hdev);

	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);

	return 0;
}
Exemple #20
0
/*
 * driver allocation handlers.
 */
int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
	int retval = -ENOMEM;

	/*
	 * Set possible interface combinations.
	 */
	rt2x00lib_set_if_combinations(rt2x00dev);

	/*
	 * Allocate the driver data memory, if necessary.
	 */
	if (rt2x00dev->ops->drv_data_size > 0) {
		rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size,
			                      GFP_KERNEL);
		if (!rt2x00dev->drv_data) {
			retval = -ENOMEM;
			goto exit;
		}
	}

	spin_lock_init(&rt2x00dev->irqmask_lock);
	mutex_init(&rt2x00dev->csr_mutex);

	set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);

	/*
	 * Make room for rt2x00_intf inside the per-interface
	 * structure ieee80211_vif.
	 */
	rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);

	/*
	 * Determine which operating modes are supported, all modes
	 * which require beaconing, depend on the availability of
	 * beacon entries.
	 */
	rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
	if (rt2x00dev->ops->bcn->entry_num > 0)
		rt2x00dev->hw->wiphy->interface_modes |=
		    BIT(NL80211_IFTYPE_ADHOC) |
		    BIT(NL80211_IFTYPE_AP) |
		    BIT(NL80211_IFTYPE_MESH_POINT) |
		    BIT(NL80211_IFTYPE_WDS);

	rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

	/*
	 * Initialize work.
	 */
	rt2x00dev->workqueue =
	    alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
	if (!rt2x00dev->workqueue) {
		retval = -ENOMEM;
		goto exit;
	}

	INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
	INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
	INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);

	/*
	 * Let the driver probe the device to detect the capabilities.
	 */
	retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to allocate device.\n");
		goto exit;
	}

	/*
	 * Allocate queue array.
	 */
	retval = rt2x00queue_allocate(rt2x00dev);
	if (retval)
		goto exit;

	/*
	 * Initialize ieee80211 structure.
	 */
	retval = rt2x00lib_probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to initialize hw.\n");
		goto exit;
	}

	/*
	 * Register extra components.
	 */
	rt2x00link_register(rt2x00dev);
	rt2x00leds_register(rt2x00dev);
	rt2x00debug_register(rt2x00dev);
	rt2x00rfkill_register(rt2x00dev);

	return 0;

exit:
	rt2x00lib_remove_dev(rt2x00dev);

	return retval;
}
static int __devinit cypress_touchkey_probe(struct i2c_client *client,
				  const struct i2c_device_id *id)
{
	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
	struct cypress_touchkey_platform_data *pdata =
					client->dev.platform_data;
	struct cypress_touchkey_info *info;
	struct input_dev *input_dev;
	int ret = 0;
	int i;
//	int retry = NUM_OF_RETRY_UPDATE;
	int ic_fw_ver;
	int error;

	struct device *sec_touchkey;

printk("[TKEY] %s _ %d\n",__func__,__LINE__);

	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
		return -EIO;
#ifdef CONFIG_OF
	if (client->dev.of_node) {
		pdata = devm_kzalloc(&client->dev,
			sizeof(struct cypress_touchkey_platform_data),
				GFP_KERNEL);
		if (!pdata) {
			dev_info(&client->dev, "Failed to allocate memory\n");
			return -ENOMEM;
		}

		error = cypress_parse_dt(&client->dev, pdata);
		if (error)
			return error;
	} else
		pdata = client->dev.platform_data;

	cypress_request_gpio(pdata);
#endif

	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		dev_err(&client->dev, "fail to memory allocation.\n");
		goto err_mem_alloc;
	}

	input_dev = input_allocate_device();
	if (!input_dev) {
		dev_err(&client->dev, "fail to allocate input device.\n");
		goto err_input_dev_alloc;
	}

	client->irq = gpio_to_irq(pdata->gpio_int);

	info->client = client;
	info->input_dev = input_dev;
	info->pdata = pdata;
	info->irq = client->irq;
	info->touchkey_update_status = 0;
	
	input_dev->name = "sec_touchkey";
	input_dev->phys = info->phys;
	input_dev->id.bustype = BUS_I2C;
	input_dev->dev.parent = &client->dev;

	info->is_powering_on = true;
	cypress_power_onoff(info, 1);
	msleep(50);
	set_bit(EV_SYN, input_dev->evbit);
	set_bit(EV_KEY, input_dev->evbit);
	set_bit(EV_LED, input_dev->evbit);
	set_bit(LED_MISC, input_dev->ledbit);

	for (i = 0; i < pdata->keycodes_size; i++) {
		info->keycode[i] = pdata->touchkey_keycode[i];
		set_bit(info->keycode[i], input_dev->keybit);
	}

	input_set_drvdata(input_dev, info);
	mutex_init(&info->touchkey_mutex);
	ret = input_register_device(input_dev);
	if (ret) {
		dev_err(&client->dev, "[TOUCHKEY] failed to register input dev (%d).\n",
			ret);
		goto err_reg_input_dev;
	}

	i2c_set_clientdata(client, info);

	if (info->pdata->gpio_led_en) {
		ret = gpio_request(info->pdata->gpio_led_en,
						"gpio_touchkey_led");
		if (ret < 0) {
			dev_err(&client->dev,
				"gpio_touchkey_led gpio_request is failed\n");
			goto err_gpio_request;
		}
		gpio_tlmm_config(GPIO_CFG(info->pdata->gpio_led_en, 0,
			GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), 1);

		cypress_touchkey_con_hw(info, true);
	}

	#ifdef USE_OPEN_CLOSE
	input_dev->open = cypress_input_open;
	input_dev->close = cypress_input_close;
	#endif

	dev_info(&info->client->dev, "gpio_to_irq IRQ %d\n",
			client->irq);

	ret = request_threaded_irq(client->irq, NULL,
			cypress_touchkey_interrupt,
			IRQF_TRIGGER_FALLING, client->dev.driver->name, info);
	if (ret < 0) {
		dev_err(&client->dev, "Failed to request IRQ %d (err: %d).\n",
				client->irq, ret);
		goto err_req_irq;
	}

#ifdef CONFIG_HAS_EARLYSUSPEND
		info->early_suspend.suspend = cypress_touchkey_early_suspend;
		info->early_suspend.resume = cypress_touchkey_late_resume;
		register_early_suspend(&info->early_suspend);
#endif /* CONFIG_HAS_EARLYSUSPEND */

	info->led_wq = create_singlethread_workqueue("cypress_touchkey");
	INIT_WORK(&info->led_work, cypress_touchkey_led_work);

	info->leds.name = TOUCHKEY_BACKLIGHT;
	info->leds.brightness = LED_FULL;
	info->leds.max_brightness = LED_FULL;
	info->leds.brightness_set = cypress_touchkey_brightness_set;

	ret = led_classdev_register(&client->dev, &info->leds);
	if (ret)
		goto err_req_irq;

#if defined(CONFIG_LCD_CONNECTION_CHECK)	//for SMD test
	if (is_lcd_attached() == 0) {
		disable_irq(client->irq);
		printk("[TSK] %s : is_lcd_attached()=0 \n",__func__);
	
	}
	else{
#endif
	msleep(20);
	ic_fw_ver = i2c_smbus_read_byte_data(client, CYPRESS_FW_VER);
	dev_err(&client->dev, "Touchkey FW Version: 0x%02x\n", ic_fw_ver);

#if defined(CONFIG_MACH_M2_ATT) || defined(CONFIG_MACH_M2_DCM) \
	|| defined(CONFIG_MACH_M2_SKT) || defined(CONFIG_MACH_M2_KDI)
	dev_err(&client->dev, "Touchkey FW Version: 0x%02x, system_rev: %x\n",
						ic_fw_ver, system_rev);
	if (0 /* ic_fw_ver < BIN_FW_VERSION */) {
		dev_err(&client->dev, "[TOUCHKEY] touchkey_update Start!!\n");
		disable_irq(client->irq);

		while (retry--) {
			if (ISSP_main() == 0) {
				dev_err(&client->dev, "[TOUCHKEY] Update success!\n");
				enable_irq(client->irq);
				break;
			}
			dev_err(&client->dev,
				"[TOUCHKEY] Touchkey_update failed... retry...\n");
		}

		if (retry <= 0) {
			if (info->pdata->gpio_led_en)
				cypress_touchkey_con_hw(info, false);
			msleep(300);
			dev_err(&client->dev, "[TOUCHKEY]Touchkey_update fail\n");
		}

		msleep(500);

		ic_fw_ver = i2c_smbus_read_byte_data(info->client,
				CYPRESS_FW_VER);
		dev_err(&client->dev,
			"[TouchKey] %s : FW Ver 0x%02x\n", __func__, ic_fw_ver);
	} else {
		dev_err(&client->dev, "[TouchKey] FW update does not need!\n");
	}
#endif
	cypress_touchkey_auto_cal(info);
#if defined(CONFIG_LCD_CONNECTION_CHECK)	//for SMD test
	}
#endif	
	sec_touchkey = device_create(sec_class, NULL, 0, NULL, "sec_touchkey");
	if (IS_ERR(sec_touchkey)) {
		pr_err("Failed to create device(sec_touchkey)!\n");
		goto err_sysfs;
	}
	dev_set_drvdata(sec_touchkey, info);


	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_firm_update_status) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_firm_update.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_firm_update) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_firm_update.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_firm_version_panel) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_firm_version_panel.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_firm_version_phone) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_firm_version_phone.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_brightness) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_brightness.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touch_sensitivity) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touch_sensitivity.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_menu) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_menu.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_back) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_back.attr.name);
		goto err_sysfs;
	}
#ifdef HOME_KEY_USE
	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_home) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_home.attr.name);
		goto err_sysfs;
	}
#endif

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_raw_data0) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_raw_data0.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_raw_data1) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_raw_data1.attr.name);
		goto err_sysfs;
	}
#ifdef HOME_KEY_USE
	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_raw_data2) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_raw_data2.attr.name);
		goto err_sysfs;
	}
#endif
	if (device_create_file(sec_touchkey, &dev_attr_touchkey_idac0) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_idac0.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey, &dev_attr_touchkey_idac1) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_idac1.attr.name);
		goto err_sysfs;
	}
#ifdef HOME_KEY_USE
	if (device_create_file(sec_touchkey, &dev_attr_touchkey_idac2) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_idac2.attr.name);
		goto err_sysfs;
	}
#endif
	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_threshold) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_threshold.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_touchkey_autocal_start) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_touchkey_autocal_start.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_autocal_enable) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_autocal_enable.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
			&dev_attr_autocal_stat) < 0) {
		pr_err("Failed to create device file(%s)!\n",
			dev_attr_autocal_stat.attr.name);
		goto err_sysfs;
	}

	if (device_create_file(sec_touchkey,
		&dev_attr_touchkey_brightness_level) < 0) {
		printk(KERN_ERR "Failed to create device file(%s)!\n",
		dev_attr_touchkey_brightness_level.attr.name);
		goto err_sysfs;
	}
	info->is_powering_on = false;
	return 0;

err_req_irq:
err_gpio_request:
	input_unregister_device(input_dev);
err_reg_input_dev:
	input_free_device(input_dev);
	input_dev = NULL;
	mutex_destroy(&info->touchkey_mutex);
err_input_dev_alloc:
	kfree(info);
err_sysfs:
	return -ENXIO;
err_mem_alloc:
	return ret;

}
Exemple #22
0
void rt2x00lib_dmastart(struct queue_entry *entry)
{
	set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
	rt2x00queue_index_inc(entry, Q_INDEX);
}
int usbnet_open (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			retval;
	struct driver_info	*info = dev->driver_info;

	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "resumption fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done_nopm;
	}

	// put into "known safe" state
	if (info->reset && (retval = info->reset (dev)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done;
	}

	// insist peer be connected
	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
		netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
		goto done;
	}

	/* start any status interrupt transfer */
	if (dev->interrupt) {
		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
		if (retval < 0) {
			netif_err(dev, ifup, dev->net,
				  "intr submit %d\n", retval);
			goto done;
		}
	}

	set_bit(EVENT_DEV_OPEN, &dev->flags);
	netif_start_queue (net);
	netif_info(dev, ifup, dev->net,
		   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
		   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
		   dev->net->mtu,
		   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
		   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
		   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
		   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
		   "simple");

	// delay posting reads until we're fully open
	tasklet_schedule (&dev->bh);
	if (info->manage_power) {
		retval = info->manage_power(dev, 1);
		if (retval < 0)
			goto done;
		usb_autopm_put_interface(dev->intf);
	}
	return retval;

done:
	usb_autopm_put_interface(dev->intf);
done_nopm:
	return retval;
}
Exemple #24
0
void rt2x00lib_dmadone(struct queue_entry *entry)
{
	set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags);
	clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
	rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE);
}
Exemple #25
0
static int btusb_probe(struct usb_interface *intf,
				const struct usb_device_id *id)
{
	struct usb_endpoint_descriptor *ep_desc;
	struct btusb_data *data;
	struct hci_dev *hdev;
	int i, err;

	BT_DBG("intf %p id %p", intf, id);

	/* interface numbers are hardcoded in the spec */
	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
		return -ENODEV;

	if (!id->driver_info) {
		const struct usb_device_id *match;
		match = usb_match_id(intf, blacklist_table);
		if (match)
			id = match;
	}

	if (id->driver_info == BTUSB_IGNORE)
		return -ENODEV;

	if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER)
		return -ENODEV;

	if (ignore_csr && id->driver_info & BTUSB_CSR)
		return -ENODEV;

	if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
		return -ENODEV;

	if (id->driver_info & BTUSB_ATH3012) {
		struct usb_device *udev = interface_to_usbdev(intf);

		/* Old firmware would otherwise let ath3k driver load
		 * patch and sysconfig files */
		if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
			return -ENODEV;
	}

	data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
		ep_desc = &intf->cur_altsetting->endpoint[i].desc;

		if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
			data->intr_ep = ep_desc;
			continue;
		}

		if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
			data->bulk_tx_ep = ep_desc;
			continue;
		}

		if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
			data->bulk_rx_ep = ep_desc;
			continue;
		}
	}

	if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
		return -ENODEV;

	data->cmdreq_type = USB_TYPE_CLASS;

	data->udev = interface_to_usbdev(intf);
	data->intf = intf;

	spin_lock_init(&data->lock);

	INIT_WORK(&data->work, btusb_work);
	INIT_WORK(&data->waker, btusb_waker);
	spin_lock_init(&data->txlock);

	init_usb_anchor(&data->tx_anchor);
	init_usb_anchor(&data->intr_anchor);
	init_usb_anchor(&data->bulk_anchor);
	init_usb_anchor(&data->isoc_anchor);
	init_usb_anchor(&data->deferred);

	hdev = hci_alloc_dev();
	if (!hdev)
		return -ENOMEM;

	hdev->bus = HCI_USB;
	hci_set_drvdata(hdev, data);

	data->hdev = hdev;

	SET_HCIDEV_DEV(hdev, &intf->dev);

	hdev->open   = btusb_open;
	hdev->close  = btusb_close;
	hdev->flush  = btusb_flush;
	hdev->send   = btusb_send_frame;
	hdev->notify = btusb_notify;

	if (id->driver_info & BTUSB_BCM92035)
		hdev->setup = btusb_setup_bcm92035;

	if (id->driver_info & BTUSB_INTEL)
		hdev->setup = btusb_setup_intel;

	/* Interface numbers are hardcoded in the specification */
	data->isoc = usb_ifnum_to_if(data->udev, 1);

	if (!reset)
		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);

	if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
		if (!disable_scofix)
			set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_BROKEN_ISOC)
		data->isoc = NULL;

	if (id->driver_info & BTUSB_DIGIANSWER) {
		data->cmdreq_type = USB_TYPE_VENDOR;
		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_CSR) {
		struct usb_device *udev = data->udev;

		/* Old firmware would otherwise execute USB reset */
		if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
			set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_SNIFFER) {
		struct usb_device *udev = data->udev;

		/* New sniffer firmware has crippled HCI interface */
		if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
			set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

		data->isoc = NULL;
	}

	if (data->isoc) {
		err = usb_driver_claim_interface(&btusb_driver,
							data->isoc, data);
		if (err < 0) {
			hci_free_dev(hdev);
			return err;
		}
	}

	err = hci_register_dev(hdev);
	if (err < 0) {
		hci_free_dev(hdev);
		return err;
	}

	usb_set_intfdata(intf, data);

	return 0;
}
Exemple #26
0
static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
{
	struct hw_mode_spec *spec = &rt2x00dev->spec;
	int status;

	if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
		return 0;

	/*
	 * Initialize HW modes.
	 */
	status = rt2x00lib_probe_hw_modes(rt2x00dev, spec);
	if (status)
		return status;

	/*
	 * Initialize HW fields.
	 */
	rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues;

	/*
	 * Initialize extra TX headroom required.
	 */
	rt2x00dev->hw->extra_tx_headroom =
		max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
		      rt2x00dev->ops->extra_tx_headroom);

	/*
	 * Take TX headroom required for alignment into account.
	 */
	if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
		rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
	else if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
		rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;

	/*
	 * Tell mac80211 about the size of our private STA structure.
	 */
	rt2x00dev->hw->sta_data_size = sizeof(struct rt2x00_sta);

	/*
	 * Allocate tx status FIFO for driver use.
	 */
	if (test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags)) {
		/*
		 * Allocate the txstatus fifo. In the worst case the tx
		 * status fifo has to hold the tx status of all entries
		 * in all tx queues. Hence, calculate the kfifo size as
		 * tx_queues * entry_num and round up to the nearest
		 * power of 2.
		 */
		int kfifo_size =
			roundup_pow_of_two(rt2x00dev->ops->tx_queues *
					   rt2x00dev->ops->tx->entry_num *
					   sizeof(u32));

		status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
				     GFP_KERNEL);
		if (status)
			return status;
	}

	/*
	 * Initialize tasklets if used by the driver. Tasklets are
	 * disabled until the interrupts are turned on. The driver
	 * has to handle that.
	 */
#define RT2X00_TASKLET_INIT(taskletname) \
	if (rt2x00dev->ops->lib->taskletname) { \
		tasklet_init(&rt2x00dev->taskletname, \
			     rt2x00dev->ops->lib->taskletname, \
			     (unsigned long)rt2x00dev); \
	}

	RT2X00_TASKLET_INIT(txstatus_tasklet);
	RT2X00_TASKLET_INIT(pretbtt_tasklet);
	RT2X00_TASKLET_INIT(tbtt_tasklet);
	RT2X00_TASKLET_INIT(rxdone_tasklet);
	RT2X00_TASKLET_INIT(autowake_tasklet);

#undef RT2X00_TASKLET_INIT

	/*
	 * Register HW.
	 */
	status = ieee80211_register_hw(rt2x00dev->hw);
	if (status)
		return status;

	set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);

	return 0;
}
Exemple #27
0
static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
	struct atmsvc_msg *msg;
	struct atm_vcc *session_vcc;
	struct sock *sk;

	msg = (struct atmsvc_msg *) skb->data;
	atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	vcc = *(struct atm_vcc **) &msg->vcc;
	pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
	sk = sk_atm(vcc);

	switch (msg->type) {
	case as_okay:
		sk->sk_err = -msg->reply;
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
			vcc->local.sas_family = AF_ATMSVC;
			memcpy(vcc->local.sas_addr.prv,
			       msg->local.sas_addr.prv, ATM_ESA_LEN);
			memcpy(vcc->local.sas_addr.pub,
			       msg->local.sas_addr.pub, ATM_E164_LEN + 1);
		}
		session_vcc = vcc->session ? vcc->session : vcc;
		if (session_vcc->vpi || session_vcc->vci)
			break;
		session_vcc->itf = msg->pvc.sap_addr.itf;
		session_vcc->vpi = msg->pvc.sap_addr.vpi;
		session_vcc->vci = msg->pvc.sap_addr.vci;
		if (session_vcc->vpi || session_vcc->vci)
			session_vcc->qos = msg->qos;
		break;
	case as_error:
		clear_bit(ATM_VF_REGIS, &vcc->flags);
		clear_bit(ATM_VF_READY, &vcc->flags);
		sk->sk_err = -msg->reply;
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		break;
	case as_indicate:
		vcc = *(struct atm_vcc **)&msg->listen_vcc;
		sk = sk_atm(vcc);
		pr_debug("as_indicate!!!\n");
		lock_sock(sk);
		if (sk_acceptq_is_full(sk)) {
			sigd_enq(NULL, as_reject, vcc, NULL, NULL);
			dev_kfree_skb(skb);
			goto as_indicate_complete;
		}
		sk->sk_ack_backlog++;
		skb_queue_tail(&sk->sk_receive_queue, skb);
		pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
		sk->sk_state_change(sk);
as_indicate_complete:
		release_sock(sk);
		return 0;
	case as_close:
		set_bit(ATM_VF_RELEASED, &vcc->flags);
		vcc_release_async(vcc, msg->reply);
		goto out;
	case as_modify:
		modify_qos(vcc, msg);
		break;
	case as_addparty:
	case as_dropparty:
		sk->sk_err_soft = -msg->reply;
					/* < 0 failure, otherwise ep_ref */
		clear_bit(ATM_VF_WAITING, &vcc->flags);
		break;
	default:
		pr_alert("bad message type %d\n", (int)msg->type);
		return -EINVAL;
	}
	sk->sk_state_change(sk);
out:
	dev_kfree_skb(skb);
	return 0;
}
Exemple #28
0
/*
 * check that a dentry lookup hit has found a valid entry
 * - NOTE! the hit can be a negative hit too, so we can't assume we have an
 *   inode
 */
static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
{
	struct afs_vnode *vnode, *dir;
	struct afs_fid fid;
	struct dentry *parent;
	struct key *key;
	void *dir_version;
	int ret;

	vnode = AFS_FS_I(dentry->d_inode);

	if (dentry->d_inode)
		_enter("{v={%x:%u} n=%s fl=%lx},",
		       vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
		       vnode->flags);
	else
		_enter("{neg n=%s}", dentry->d_name.name);

	key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell);
	if (IS_ERR(key))
		key = NULL;

	/* lock down the parent dentry so we can peer at it */
	parent = dget_parent(dentry);
	if (!parent->d_inode)
		goto out_bad;

	dir = AFS_FS_I(parent->d_inode);

	/* validate the parent directory */
	if (test_bit(AFS_VNODE_MODIFIED, &dir->flags))
		afs_validate(dir, key);

	if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
		_debug("%s: parent dir deleted", dentry->d_name.name);
		goto out_bad;
	}

	dir_version = (void *) (unsigned long) dir->status.data_version;
	if (dentry->d_fsdata == dir_version)
		goto out_valid; /* the dir contents are unchanged */

	_debug("dir modified");

	/* search the directory for this vnode */
	ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key);
	switch (ret) {
	case 0:
		/* the filename maps to something */
		if (!dentry->d_inode)
			goto out_bad;
		if (is_bad_inode(dentry->d_inode)) {
			printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
			       parent->d_name.name, dentry->d_name.name);
			goto out_bad;
		}

		/* if the vnode ID has changed, then the dirent points to a
		 * different file */
		if (fid.vnode != vnode->fid.vnode) {
			_debug("%s: dirent changed [%u != %u]",
			       dentry->d_name.name, fid.vnode,
			       vnode->fid.vnode);
			goto not_found;
		}

		/* if the vnode ID uniqifier has changed, then the file has
		 * been deleted and replaced, and the original vnode ID has
		 * been reused */
		if (fid.unique != vnode->fid.unique) {
			_debug("%s: file deleted (uq %u -> %u I:%llu)",
			       dentry->d_name.name, fid.unique,
			       vnode->fid.unique,
			       (unsigned long long)dentry->d_inode->i_version);
			spin_lock(&vnode->lock);
			set_bit(AFS_VNODE_DELETED, &vnode->flags);
			spin_unlock(&vnode->lock);
			goto not_found;
		}
		goto out_valid;

	case -ENOENT:
		/* the filename is unknown */
		_debug("%s: dirent not found", dentry->d_name.name);
		if (dentry->d_inode)
			goto not_found;
		goto out_valid;

	default:
		_debug("failed to iterate dir %s: %d",
		       parent->d_name.name, ret);
		goto out_bad;
	}

out_valid:
	dentry->d_fsdata = dir_version;
out_skip:
	dput(parent);
	key_put(key);
	_leave(" = 1 [valid]");
	return 1;

	/* the dirent, if it exists, now points to a different vnode */
not_found:
	spin_lock(&dentry->d_lock);
	dentry->d_flags |= DCACHE_NFSFS_RENAMED;
	spin_unlock(&dentry->d_lock);

out_bad:
	if (dentry->d_inode) {
		/* don't unhash if we have submounts */
		if (have_submounts(dentry))
			goto out_skip;
	}

	_debug("dropping dentry %s/%s",
	       parent->d_name.name, dentry->d_name.name);
	shrink_dcache_parent(dentry);
	d_drop(dentry);
	dput(parent);
	key_put(key);

	_leave(" = 0 [bad]");
	return 0;
}
Exemple #29
0
/* pasemi_dma_free_flag - Deallocates a flag (event)
 * @flag: Flag number to deallocate
 *
 * Frees up a flag so it can be reused for other purposes.
 */
void pasemi_dma_free_flag(int flag)
{
	BUG_ON(test_bit(flag, flags_free));
	BUG_ON(flag >= MAX_FLAGS);
	set_bit(flag, flags_free);
}
Exemple #30
0
static int
startup(struct async_struct *info)
{
	unsigned long flags;
	int	retval=0;
	irq_handler_t handler;
	struct serial_state *state= info->state;
	unsigned long page;

	page = get_zeroed_page(GFP_KERNEL);
	if (!page)
		return -ENOMEM;

	local_irq_save(flags);

	if (info->flags & ASYNC_INITIALIZED) {
		free_page(page);
		goto errout;
	}

	if (!state->port || !state->type) {
		if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
		free_page(page);
		goto errout;
	}
	if (info->xmit.buf)
		free_page(page);
	else
		info->xmit.buf = (unsigned char *) page;

#ifdef SIMSERIAL_DEBUG
	printk("startup: ttys%d (irq %d)...", info->line, state->irq);
#endif

	/*
	 * Allocate the IRQ if necessary
	 */
	if (state->irq && (!IRQ_ports[state->irq] ||
			  !IRQ_ports[state->irq]->next_port)) {
		if (IRQ_ports[state->irq]) {
			retval = -EBUSY;
			goto errout;
		} else
			handler = rs_interrupt_single;

		retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL);
		if (retval) {
			if (capable(CAP_SYS_ADMIN)) {
				if (info->tty)
					set_bit(TTY_IO_ERROR,
						&info->tty->flags);
				retval = 0;
			}
			goto errout;
		}
	}

	/*
	 * Insert serial port into IRQ chain.
	 */
	info->prev_port = NULL;
	info->next_port = IRQ_ports[state->irq];
	if (info->next_port)
		info->next_port->prev_port = info;
	IRQ_ports[state->irq] = info;

	if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags);

	info->xmit.head = info->xmit.tail = 0;

#if 0
	/*
	 * Set up serial timers...
	 */
	timer_table[RS_TIMER].expires = jiffies + 2*HZ/100;
	timer_active |= 1 << RS_TIMER;
#endif

	/*
	 * Set up the tty->alt_speed kludge
	 */
	if (info->tty) {
		if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
			info->tty->alt_speed = 57600;
		if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
			info->tty->alt_speed = 115200;
		if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
			info->tty->alt_speed = 230400;
		if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
			info->tty->alt_speed = 460800;
	}

	info->flags |= ASYNC_INITIALIZED;
	local_irq_restore(flags);
	return 0;

errout:
	local_irq_restore(flags);
	return retval;
}