コード例 #1
0
static int s3c24xx_pcm_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct s3c24xx_runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s3c24xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data;
	unsigned long totbytes;
	int ret=0;
	
	s3cdbg("Entered %s, params = %p \n", __FUNCTION__, prtd->params);

	if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
		totbytes = params_buffer_bytes(params) * ANDROID_BUF_NUM;
	
	else 
		totbytes = params_buffer_bytes(params);

//	printk("[%d]:ring_buf_num %d\n", substream->stream, ring_buf_num);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params */
	if (prtd->params == NULL) {
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	} else if (prtd->params != dma) {
		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
		prtd->params = dma;
		s3cdbg("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);


		/* prepare DMA */
		ret = s3c2410_dma_request(prtd->params->channel,
					  prtd->params->client, NULL);

		if (ret) {
			printk(KERN_ERR "failed to get dma channel\n");
			return ret;
		}
	}

	/* channel needs configuring for mem=>device, increment memory addr,
	 * sync to pclk, half-word transfers to the IIS-FIFO. */
#if !defined (CONFIG_CPU_S3C6400) && !defined (CONFIG_CPU_S3C6410)  && !defined(CONFIG_CPU_S5PC100) && !defined (CONFIG_CPU_S5P6440)
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, S3C2410_DISRCC_INC |
				S3C2410_DISRCC_APB, prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_SYNC_PCLK | 
				S3C2410_DCON_HANDSHAKE);
	} else {
		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size,
				S3C2410_DCON_HANDSHAKE | 
				S3C2410_DCON_SYNC_PCLK);

		s3c2410_dma_devconfig(prtd->params->channel,
					S3C2410_DMASRC_HW, 0x3,
					prtd->params->dma_addr);
	}

#else
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_MEM, 0,
				prtd->params->dma_addr);

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);
	} else {
		s3c2410_dma_devconfig(prtd->params->channel,
				S3C2410_DMASRC_HW, 0,
				prtd->params->dma_addr);		

		s3c2410_dma_config(prtd->params->channel,
				prtd->params->dma_size, 0);
	}
#endif

	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
				    s3c24xx_audio_buffdone);

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_limit = runtime->hw.periods_min;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;
	spin_unlock_irq(&prtd->lock);

	s3cdbg("Entered %s, line %d \n", __FUNCTION__, __LINE__);
	return 0;
}
コード例 #2
0
ファイル: iwctl.c プロジェクト: mbgg/linux
/*
 * Wireless Handler: set encode mode
 */
int iwctl_siwencode(struct net_device *dev, struct iw_request_info *info,
		union iwreq_data *wrqu, char *extra)
{
	struct vnt_private *pDevice = netdev_priv(dev);
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	struct iw_point *wrq = &wrqu->encoding;
	u32 dwKeyIndex = (u32)(wrq->flags & IW_ENCODE_INDEX);
	int ii;
	int uu;
	int rc = 0;
	int index = (wrq->flags & IW_ENCODE_INDEX);

	DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODE\n");

	if (pMgmt == NULL)
		return -EFAULT;

	// Check the size of the key
	if (wrq->length > WLAN_WEP232_KEYLEN) {
		rc = -EINVAL;
		return rc;
	}

	if (dwKeyIndex > WLAN_WEP_NKEYS) {
		rc = -EINVAL;
		return rc;
	}

	if (dwKeyIndex > 0)
		dwKeyIndex--;

	// Send the key to the card
	if (wrq->length > 0) {
		if (wrq->length == WLAN_WEP232_KEYLEN) {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 232 bit wep key\n");
		} else if (wrq->length == WLAN_WEP104_KEYLEN) {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 104 bit wep key\n");
		} else if (wrq->length == WLAN_WEP40_KEYLEN) {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set 40 bit wep key, index= %d\n", (int)dwKeyIndex);
		}
		memset(pDevice->abyKey, 0, WLAN_WEP232_KEYLEN);
		memcpy(pDevice->abyKey, extra, wrq->length);

		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyKey: ");
		for (ii = 0; ii < wrq->length; ii++)
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pDevice->abyKey[ii]);

		if (pDevice->flags & DEVICE_FLAGS_OPENED) {
			spin_lock_irq(&pDevice->lock);
			KeybSetDefaultKey(pDevice,
					&(pDevice->sKey),
					dwKeyIndex | (1 << 31),
					wrq->length, NULL,
					pDevice->abyKey,
					KEY_CTL_WEP);
			spin_unlock_irq(&pDevice->lock);
		}
		pDevice->byKeyIndex = (u8)dwKeyIndex;
		pDevice->uKeyLength = wrq->length;
		pDevice->bTransmitKey = true;
		pDevice->bEncryptionEnable = true;
		pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;

		// Do we want to just set the transmit key index?
		if (index < 4) {
			pDevice->byKeyIndex = index;
		} else if (!(wrq->flags & IW_ENCODE_MODE)) {
			rc = -EINVAL;
			return rc;
		}
	}
	// Read the flags
	if (wrq->flags & IW_ENCODE_DISABLED) {
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable WEP function\n");
		pMgmt->bShareKeyAlgorithm = false;
		pDevice->bEncryptionEnable = false;
		pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
		if (pDevice->flags & DEVICE_FLAGS_OPENED) {
			spin_lock_irq(&pDevice->lock);
			for (uu = 0; uu < MAX_KEY_TABLE; uu++)
				MACvDisableKeyEntry(pDevice, uu);
			spin_unlock_irq(&pDevice->lock);
		}
	}
	if (wrq->flags & IW_ENCODE_RESTRICTED) {
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & ShareKey System\n");
		pMgmt->bShareKeyAlgorithm = true;
	}
	if (wrq->flags & IW_ENCODE_OPEN) {
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable WEP & Open System\n");
		pMgmt->bShareKeyAlgorithm = false;
	}

	memset(pMgmt->abyDesireBSSID, 0xFF, 6);

	return rc;
}
コード例 #3
0
ファイル: iwctl.c プロジェクト: mbgg/linux
/*
 * Wireless Handler: set operation mode
 */
int iwctl_siwmode(struct net_device *dev, struct iw_request_info *info,
		union iwreq_data *wrqu, char *extra)
{
	struct vnt_private *pDevice = netdev_priv(dev);
	__u32 *wmode = &wrqu->mode;
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	int rc = 0;

	DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMODE\n");

	if (pMgmt == NULL)
		return -EFAULT;

	if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP && pDevice->bEnableHostapd) {
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
			"Can't set operation mode, hostapd is running\n");
		return rc;
	}

	switch (*wmode) {
	case IW_MODE_ADHOC:
		if (pMgmt->eConfigMode != WMAC_CONFIG_IBSS_STA) {
			pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = true;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to ad-hoc \n");
		break;
	case IW_MODE_AUTO:
	case IW_MODE_INFRA:
		if (pMgmt->eConfigMode != WMAC_CONFIG_ESS_STA) {
			pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = true;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to infrastructure \n");
		break;
	case IW_MODE_MASTER:

		pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
		rc = -EOPNOTSUPP;
		break;

		if (pMgmt->eConfigMode != WMAC_CONFIG_AP) {
			pMgmt->eConfigMode = WMAC_CONFIG_AP;
			if (pDevice->flags & DEVICE_FLAGS_OPENED)
				pDevice->bCommit = true;
		}
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "set mode to Access Point \n");
		break;

	case IW_MODE_REPEAT:
		pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
		rc = -EOPNOTSUPP;
		break;
	default:
		rc = -EINVAL;
	}

	if (pDevice->bCommit) {
		if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
			netif_stop_queue(pDevice->dev);
			spin_lock_irq(&pDevice->lock);
			bScheduleCommand((void *) pDevice,
				WLAN_CMD_RUN_AP, NULL);
			spin_unlock_irq(&pDevice->lock);
		} else {
			DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
				"Commit the settings\n");

			spin_lock_irq(&pDevice->lock);

			if (pDevice->bLinkPass &&
				memcmp(pMgmt->abyCurrSSID,
					pMgmt->abyDesireSSID,
					WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) {
				bScheduleCommand((void *) pDevice,
					WLAN_CMD_DISASSOCIATE, NULL);
			} else {
				pDevice->bLinkPass = false;
				pMgmt->eCurrState = WMAC_STATE_IDLE;
				memset(pMgmt->abyCurrBSSID, 0, 6);
			}

			ControlvMaskByte(pDevice,
				MESSAGE_REQUEST_MACREG,	MAC_REG_PAPEDELAY,
					LEDSTS_STS, LEDSTS_SLOW);

			netif_stop_queue(pDevice->dev);

			pMgmt->eScanType = WMAC_SCAN_ACTIVE;

			if (!pDevice->bWPASuppWextEnabled)
				bScheduleCommand((void *) pDevice,
					 WLAN_CMD_BSSID_SCAN,
					 pMgmt->abyDesireSSID);

			bScheduleCommand((void *) pDevice,
				 WLAN_CMD_SSID,
				 NULL);

			spin_unlock_irq(&pDevice->lock);
		}
		pDevice->bCommit = false;
	}


	return rc;
}
コード例 #4
0
static int btusb_resume(struct usb_interface *intf)
{
    struct btusb_data *data = usb_get_intfdata(intf);
    struct hci_dev *hdev = data->hdev;
    int err = 0;

    if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
        return 0;


    /*******************************/
    RTKBT_DBG("btusb_resume data->suspend_count=%d",data->suspend_count);

    if (!test_bit(HCI_RUNNING, &hdev->flags))
    {
        RTKBT_DBG("btusb_resume-----bt is off,download patch");
        download_patch(intf);
    }
    else
        RTKBT_DBG("btusb_resume,----bt is on");
    /*******************************/
    if (--data->suspend_count)
        return 0;

    if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
        err = btusb_submit_intr_urb(hdev, GFP_NOIO);
        if (err < 0) {
            clear_bit(BTUSB_INTR_RUNNING, &data->flags);
            goto failed;
        }
    }

    if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) {
        err = btusb_submit_bulk_urb(hdev, GFP_NOIO);
        if (err < 0) {
            clear_bit(BTUSB_BULK_RUNNING, &data->flags);
            goto failed;
        }

        btusb_submit_bulk_urb(hdev, GFP_NOIO);
    }

    if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
        if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0)
            clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
        else
            btusb_submit_isoc_urb(hdev, GFP_NOIO);
    }

    spin_lock_irq(&data->txlock);
    play_deferred(data);
    clear_bit(BTUSB_SUSPENDING, &data->flags);
    spin_unlock_irq(&data->txlock);
    schedule_work(&data->work);

    return 0;

failed:
    mdelay(URB_CANCELING_DELAY_MS);      // Added by Realtek
    usb_scuttle_anchored_urbs(&data->deferred);
//done:
    spin_lock_irq(&data->txlock);
    clear_bit(BTUSB_SUSPENDING, &data->flags);
    spin_unlock_irq(&data->txlock);

    return err;
}
コード例 #5
0
ファイル: ipath_cq.c プロジェクト: 03199618/linux
/**
 * ipath_create_cq - create a completion queue
 * @ibdev: the device this completion queue is attached to
 * @entries: the minimum size of the completion queue
 * @context: unused by the InfiniPath driver
 * @udata: unused by the InfiniPath driver
 *
 * Returns a pointer to the completion queue or negative errno values
 * for failure.
 *
 * Called by ib_create_cq() in the generic verbs code.
 */
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
			      struct ib_ucontext *context,
			      struct ib_udata *udata)
{
	struct ipath_ibdev *dev = to_idev(ibdev);
	struct ipath_cq *cq;
	struct ipath_cq_wc *wc;
	struct ib_cq *ret;
	u32 sz;

	if (entries < 1 || entries > ib_ipath_max_cqes) {
		ret = ERR_PTR(-EINVAL);
		goto done;
	}

	/* Allocate the completion queue structure. */
	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq) {
		ret = ERR_PTR(-ENOMEM);
		goto done;
	}

	/*
	 * Allocate the completion queue entries and head/tail pointers.
	 * This is allocated separately so that it can be resized and
	 * also mapped into user space.
	 * We need to use vmalloc() in order to support mmap and large
	 * numbers of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
	else
		sz += sizeof(struct ib_wc) * (entries + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_cq;
	}

	/*
	 * Return the address of the WC as the offset to mmap.
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;

		cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	} else
		cq->ip = NULL;

	spin_lock(&dev->n_cqs_lock);
	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
		spin_unlock(&dev->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_cqs_allocated++;
	spin_unlock(&dev->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
	 * an error.
	 */
	cq->ibcq.cqe = entries;
	cq->notify = IB_CQ_NONE;
	cq->triggered = 0;
	spin_lock_init(&cq->lock);
	tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
	wc->head = 0;
	wc->tail = 0;
	cq->queue = wc;

	ret = &cq->ibcq;

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);
bail_cq:
	kfree(cq);
done:
	return ret;
}
コード例 #6
0
/*
 * Description:
 *      vt6655_hostap_ioctl main function supported for hostap deamon.
 *
 * Parameters:
 *  In:
 *      pDevice   -
 *      iw_point  -
 *  Out:
 *
 * Return Value:
 *
 */
int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
{
	struct viawget_hostapd_param *param;
	int ret = 0;
	int ap_ioctl = 0;

	if (p->length < sizeof(struct viawget_hostapd_param) ||
	    p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
		return -EINVAL;

	param = kmalloc((int)p->length, GFP_KERNEL);
	if (param == NULL)
		return -ENOMEM;

	if (copy_from_user(param, p->pointer, p->length)) {
		ret = -EFAULT;
		goto out;
	}

	switch (param->cmd) {
	case VIAWGET_HOSTAPD_SET_ENCRYPTION:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ENCRYPTION \n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_set_encryption(pDevice, param, p->length);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_GET_ENCRYPTION:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_ENCRYPTION \n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_get_encryption(pDevice, param, p->length);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR \n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_FLUSH:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_FLUSH \n");
		spin_lock_irq(&pDevice->lock);
		hostap_flush_sta(pDevice);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_ADD_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_ADD_STA \n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_add_sta(pDevice, param);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_REMOVE_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_REMOVE_STA \n");
		spin_lock_irq(&pDevice->lock);
		ret = hostap_remove_sta(pDevice, param);
		spin_unlock_irq(&pDevice->lock);
		break;
	case VIAWGET_HOSTAPD_GET_INFO_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_INFO_STA \n");
		ret = hostap_get_info_sta(pDevice, param);
		ap_ioctl = 1;
		break;
/*
	case VIAWGET_HOSTAPD_RESET_TXEXC_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_RESET_TXEXC_STA \n");
		ret = hostap_reset_txexc_sta(pDevice, param);
		break;
*/
	case VIAWGET_HOSTAPD_SET_FLAGS_STA:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_FLAGS_STA \n");
		ret = hostap_set_flags_sta(pDevice, param);
		break;
	case VIAWGET_HOSTAPD_MLME:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_MLME \n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT \n");
		ret = hostap_set_generic_element(pDevice, param);
		break;
	case VIAWGET_HOSTAPD_SCAN_REQ:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SCAN_REQ \n");
		ret = -EOPNOTSUPP;
		goto out;
	case VIAWGET_HOSTAPD_STA_CLEAR_STATS:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_STA_CLEAR_STATS \n");
		ret = -EOPNOTSUPP;
		goto out;
	default:
		DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6655_hostap_ioctl: unknown cmd=%d\n",
			(int)param->cmd);
		ret = -EOPNOTSUPP;
		goto out;
	}

	if ((ret == 0) && ap_ioctl) {
		if (copy_to_user(p->pointer, param, p->length)) {
			ret = -EFAULT;
		}
	}

out:
	kfree(param);
	return ret;
}
コード例 #7
0
ファイル: u_serial.c プロジェクト: 33d/linux-2.6.21-hh20
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(unsigned long _port)
{
	struct gs_port		*port = (void *)_port;
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if (tty && do_push)
		tty_flip_buffer_push(tty);


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the tasklet
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				tasklet_schedule(&port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
コード例 #8
0
static int s5p_ehci_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
	struct usb_hcd *hcd = s5p_ehci->hcd;
	struct ehci_hcd *ehci = hcd_to_ehci(hcd);

	clk_enable(s5p_ehci->clk);

	s5p_ehci_phy_init(pdev);

	/* if EHCI was off, hcd was removed */
	if (!s5p_ehci->power_on) {
		dev_info(dev, "Nothing to do for the device (power off)\n");
		return 0;
	}

	if (time_before(jiffies, ehci->next_statechange))
		usleep_range(10000, 11000);

	/* Mark hardware accessible again as we are out of D3 state by now */
	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
	if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
		int	mask = INTR_MASK;

		if (!hcd->self.root_hub->do_remote_wakeup)
			mask &= ~STS_PCD;
		ehci_writel(ehci, mask, &ehci->regs->intr_enable);
		ehci_readl(ehci, &ehci->regs->intr_enable);
		return 0;
	}

	ehci_dbg(ehci, "lost power, restarting\n");
	usb_root_hub_lost_power(hcd->self.root_hub);

	(void) ehci_halt(ehci);
	(void) ehci_reset(ehci);

	/* emptying the schedule aborts any urbs */
	spin_lock_irq(&ehci->lock);
	if (ehci->reclaim)
		end_unlink_async(ehci);
	ehci_work(ehci);
	spin_unlock_irq(&ehci->lock);

	ehci_writel(ehci, ehci->command, &ehci->regs->command);
	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */

	/* here we "know" root ports should always stay powered */
	ehci_port_power(ehci, 1);

	hcd->state = HC_STATE_SUSPENDED;

	/* Update runtime PM status and clear runtime_error */
	pm_runtime_disable(dev);
	pm_runtime_set_active(dev);
	pm_runtime_enable(dev);

	/* Prevent device from runtime suspend during resume time */
	pm_runtime_get_sync(dev);

#ifdef CONFIG_MDM_HSIC_PM
	set_host_stat(hsic_pm_dev, POWER_ON);
	wait_dev_pwr_stat(hsic_pm_dev, POWER_ON);
#endif
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) \
	|| defined(CONFIG_MDM_HSIC_PM)
	pm_runtime_mark_last_busy(&hcd->self.root_hub->dev);
#endif
	return 0;
}
コード例 #9
0
ファイル: smp.c プロジェクト: mrtos/Logitech-Revue
void lock_ipi_call_lock(void)
{
	spin_lock_irq(&call_lock);
}
コード例 #10
0
ファイル: signal.c プロジェクト: TitaniumBoy/lin
/* Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 */
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
			 unsigned long orig_i0, int restart_syscall)
{
	unsigned long signr;
	siginfo_t info;
	struct k_sigaction *ka;
	
	if (!oldset)
		oldset = &current->blocked;

#ifdef CONFIG_SPARC32_COMPAT
	if (current->thread.flags & SPARC_FLAG_32BIT) {
		extern asmlinkage int do_signal32(sigset_t *, struct pt_regs *,
						  unsigned long, int);
		return do_signal32(oldset, regs, orig_i0, restart_syscall);
	}
#endif	
	for (;;) {
		spin_lock_irq(&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, &info);
		spin_unlock_irq(&current->sigmask_lock);
		
		if (!signr) break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();
			if (!(signr = current->exit_code))
				continue;
			current->exit_code = 0;
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good?  */
			if (signr != info.si_signo) {
				info.si_signo = signr;
				info.si_errno = 0;
				info.si_code = SI_USER;
				info.si_pid = current->p_pptr->pid;
				info.si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, &info, current);
				continue;
			}
		}
		
		ka = &current->sig->action[signr-1];
		
		if(ka->sa.sa_handler == SIG_IGN) {
			if(signr != SIGCHLD)
				continue;

                        /* sys_wait4() grabs the master kernel lock, so
                         * we need not do so, that sucker should be
                         * threaded and would not be that difficult to
                         * do anyways.
                         */
                        while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
                                ;
			continue;
		}
		if(ka->sa.sa_handler == SIG_DFL) {
			unsigned long exit_code = signr;
			
			if(current->pid == 1)
				continue;
			switch(signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
					continue;

			case SIGSTOP:
				if (current->ptrace & PT_PTRACED)
					continue;
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				if(!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
				     SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				continue;

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
#ifdef DEBUG_SIGNALS
				/* Very useful to debug the dynamic linker */
				printk ("Sig %d going...\n", (int)signr);
				show_regs (regs);
#ifdef DEBUG_SIGNALS_TRACE
				{
					struct reg_window *rw = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
					unsigned long ins[8];
                                                
					while(rw &&
					      !(((unsigned long) rw) & 0x3)) {
					        copy_from_user(ins, &rw->ins[0], sizeof(ins));
						printk("Caller[%016lx](%016lx,%016lx,%016lx,%016lx,%016lx,%016lx)\n", ins[7], ins[0], ins[1], ins[2], ins[3], ins[4], ins[5]);
						rw = (struct reg_window *)(unsigned long)(ins[6] + STACK_BIAS);
					}
				}
#endif			
#ifdef DEBUG_SIGNALS_MAPS	
				printk("Maps:\n");
				read_maps();
#endif
#endif
				/* fall through */
			default:
				sigaddset(&current->pending.signal, signr);
				recalc_sigpending(current);
				current->flags |= PF_SIGNALED;
				do_exit(exit_code);
				/* NOT REACHED */
			}
		}
		if(restart_syscall)
			syscall_restart(orig_i0, regs, &ka->sa);
		handle_signal(signr, ka, &info, oldset, regs);
		return 1;
	}
	if(restart_syscall &&
	   (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
	    regs->u_regs[UREG_I0] == ERESTARTSYS ||
	    regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
		/* replay the system call when we are done */
		regs->u_regs[UREG_I0] = orig_i0;
		regs->tpc -= 4;
		regs->tnpc -= 4;
	}
	return 0;
}
コード例 #11
0
ファイル: signal.c プロジェクト: TitaniumBoy/lin
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
	struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
	struct thread_struct *tp = &current->thread;
	mc_gregset_t *grp;
	unsigned long pc, npc, tstate;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	flush_user_windows();
	if(tp->w_saved						||
	   (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
	   (!__access_ok((unsigned long)ucp, sizeof(*ucp))))
		goto do_sigsegv;
	grp  = &ucp->uc_mcontext.mc_gregs;
	err  = __get_user(pc, &((*grp)[MC_PC]));
	err |= __get_user(npc, &((*grp)[MC_NPC]));
	if(err || ((pc | npc) & 3))
		goto do_sigsegv;
	if(regs->u_regs[UREG_I1]) {
		sigset_t set;

		if (_NSIG_WORDS == 1) {
			if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
				goto do_sigsegv;
		} else {
			if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
				goto do_sigsegv;
		}
		sigdelsetmask(&set, ~_BLOCKABLE);
		spin_lock_irq(&current->sigmask_lock);
		current->blocked = set;
		recalc_sigpending(current);
		spin_unlock_irq(&current->sigmask_lock);
	}
	if ((tp->flags & SPARC_FLAG_32BIT) != 0) {
		pc &= 0xffffffff;
		npc &= 0xffffffff;
	}
	regs->tpc = pc;
	regs->tnpc = npc;
	err |= __get_user(regs->y, &((*grp)[MC_Y]));
	err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
	regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
	regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_XCC));
	err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
	err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
	err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
	err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
	err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
	err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
	err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
	err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
	err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
	err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
	err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
	err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
	err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
	err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
	err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));

	err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
	err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
	err |= __put_user(fp,
	      (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __put_user(i7,
	      (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));

	err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
	if(fenab) {
		unsigned long *fpregs = (unsigned long *)(((char *)current) + AOFF_task_fpregs);
		unsigned long fprs;
		
		fprs_write(0);
		err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
		if (fprs & FPRS_DL)
			err |= copy_from_user(fpregs,
					      &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
					      (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_from_user(fpregs+16,
			 ((unsigned long *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
			 (sizeof(unsigned int) * 32));
		err |= __get_user(current->thread.xfsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
		err |= __get_user(current->thread.gsr[0],
				  &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
		regs->tstate &= ~TSTATE_PEF;
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	do_exit(SIGSEGV);
}
コード例 #12
0
/*
 * Note that 'init' is a special process: it doesn't get signals it doesn't
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 * mistake.
 *
 * Note that we go through the signals twice: once to check the signals that
 * the kernel can handle, and then we build all the user-level signal handling
 * stack-frames in one go after that.
 */
asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
{
	struct k_sigaction *ka;
	siginfo_t info;
	int single_stepping;

	/*
	 * We want the common case to go fast, which
	 * is why we may in certain cases get here from
	 * kernel mode. Just return without doing anything
	 * if so.
	 */
	if (!user_mode(regs))
		return 0;

	if (!oldset)
		oldset = &current->blocked;

	single_stepping = ptrace_cancel_bpt(current);

	for (;;) {
		unsigned long signr;

		spin_lock_irq (&current->sigmask_lock);
		signr = dequeue_signal(&current->blocked, &info);
		spin_unlock_irq (&current->sigmask_lock);

		if (!signr)
			break;

		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
			/* Let the debugger run.  */
			current->exit_code = signr;
			current->state = TASK_STOPPED;
			notify_parent(current, SIGCHLD);
			schedule();
			single_stepping |= ptrace_cancel_bpt(current);

			/* We're back.  Did the debugger cancel the sig?  */
			if (!(signr = current->exit_code))
				continue;
			current->exit_code = 0;

			/* The debugger continued.  Ignore SIGSTOP.  */
			if (signr == SIGSTOP)
				continue;

			/* Update the siginfo structure.  Is this good? */
			if (signr != info.si_signo) {
				info.si_signo = signr;
				info.si_errno = 0;
				info.si_code = SI_USER;
				info.si_pid = current->p_pptr->pid;
				info.si_uid = current->p_pptr->uid;
			}

			/* If the (new) signal is now blocked, requeue it.  */
			if (sigismember(&current->blocked, signr)) {
				send_sig_info(signr, &info, current);
				continue;
			}
		}

		ka = &current->sig->action[signr-1];
		if (ka->sa.sa_handler == SIG_IGN) {
			if (signr != SIGCHLD)
				continue;
			/* Check for SIGCHLD: it's special.  */
			while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
				/* nothing */;
			continue;
		}

		if (ka->sa.sa_handler == SIG_DFL) {
			int exit_code = signr;

			/* Init gets no signals it doesn't want.  */
			if (current->pid == 1)
				continue;

			switch (signr) {
			case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
				continue;

			case SIGTSTP: case SIGTTIN: case SIGTTOU:
				if (is_orphaned_pgrp(current->pgrp))
					continue;
				/* FALLTHRU */

			case SIGSTOP: {
				struct signal_struct *sig;
				current->state = TASK_STOPPED;
				current->exit_code = signr;
				sig = current->p_pptr->sig;
				if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
					notify_parent(current, SIGCHLD);
				schedule();
				single_stepping |= ptrace_cancel_bpt(current);
				continue;
			}

			case SIGQUIT: case SIGILL: case SIGTRAP:
			case SIGABRT: case SIGFPE: case SIGSEGV:
			case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
				if (do_coredump(signr, regs))
					exit_code |= 0x80;
				/* FALLTHRU */

			default:
				sigaddset(&current->pending.signal, signr);
				recalc_sigpending(current);
				current->flags |= PF_SIGNALED;
				do_exit(exit_code);
				/* NOTREACHED */
			}
		}

		/* Are we from a system call? */
		if (syscall) {
			switch (regs->ARM_r0) {
			case -ERESTARTNOHAND:
				regs->ARM_r0 = -EINTR;
				break;

			case -ERESTARTSYS:
				if (!(ka->sa.sa_flags & SA_RESTART)) {
					regs->ARM_r0 = -EINTR;
					break;
				}
				/* fallthrough */
			case -ERESTARTNOINTR:
				regs->ARM_r0 = regs->ARM_ORIG_r0;
				regs->ARM_pc -= 4;
			}
		}
		/* Whee!  Actually deliver the signal.  */
		handle_signal(signr, ka, &info, oldset, regs);
		if (single_stepping)
		    	ptrace_set_bpt(current);
		return 1;
	}

	if (syscall &&
	    (regs->ARM_r0 == -ERESTARTNOHAND ||
	     regs->ARM_r0 == -ERESTARTSYS ||
	     regs->ARM_r0 == -ERESTARTNOINTR)) {
		regs->ARM_r0 = regs->ARM_ORIG_r0;
		regs->ARM_pc -= 4;
	}
	if (single_stepping)
		ptrace_set_bpt(current);
	return 0;
}
コード例 #13
0
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, int len,
		  int noblock, int flags, int *addr_len)
{
  	struct sk_buff *skb;
  	int copied, err;

  	if (addr_len)
  		*addr_len=sizeof(struct sockaddr_in6);
  
	if (flags & MSG_ERRQUEUE)
		return ipv6_recv_error(sk, msg, len);

	skb = skb_recv_datagram(sk, flags, noblock, &err);
	if (!skb)
		goto out;

 	copied = skb->len - sizeof(struct udphdr);
  	if (copied > len) {
  		copied = len;
  		msg->msg_flags |= MSG_TRUNC;
  	}

	if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else if (msg->msg_flags&MSG_TRUNC) {
		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
			goto csum_copy_err;
		err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
					      copied);
	} else {
		err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
		if (err == -EINVAL)
			goto csum_copy_err;
	}
	if (err)
		goto out_free;

	sock_recv_timestamp(msg, sk, skb);

	/* Copy the address. */
	if (msg->msg_name) {
		struct sockaddr_in6 *sin6;
	  
		sin6 = (struct sockaddr_in6 *) msg->msg_name;
		sin6->sin6_family = AF_INET6;
		sin6->sin6_port = skb->h.uh->source;
		sin6->sin6_flowinfo = 0;
		sin6->sin6_scope_id = 0;

		if (skb->protocol == htons(ETH_P_IP)) {
			ipv6_addr_set(&sin6->sin6_addr, 0, 0,
				      htonl(0xffff), skb->nh.iph->saddr);
			if (sk->protinfo.af_inet.cmsg_flags)
				ip_cmsg_recv(msg, skb);
		} else {
			memcpy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr,
			       sizeof(struct in6_addr));

			if (sk->net_pinfo.af_inet6.rxopt.all)
				datagram_recv_ctl(sk, msg, skb);
			if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
				struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;
				sin6->sin6_scope_id = opt->iif;
			}
		}
  	}
	err = copied;

out_free:
	skb_free_datagram(sk, skb);
out:
	return err;

csum_copy_err:
	/* Clear queue. */
	if (flags&MSG_PEEK) {
		int clear = 0;
		spin_lock_irq(&sk->receive_queue.lock);
		if (skb == skb_peek(&sk->receive_queue)) {
			__skb_unlink(skb, &sk->receive_queue);
			clear = 1;
		}
		spin_unlock_irq(&sk->receive_queue.lock);
		if (clear)
			kfree_skb(skb);
	}

	/* Error for blocking case is chosen to masquerade
	   as some normal condition.
	 */
	err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
	UDP6_INC_STATS_USER(UdpInErrors);
	goto out_free;
}
コード例 #14
0
static int target_fabric_mappedlun_link(
	struct config_item *lun_acl_ci,
	struct config_item *lun_ci)
{
	struct se_dev_entry *deve;
	struct se_lun *lun = container_of(to_config_group(lun_ci),
			struct se_lun, lun_group);
	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
			struct se_lun_acl, se_lun_group);
	struct se_portal_group *se_tpg;
	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
	int ret = 0, lun_access;
	/*
	 * Ensure that the source port exists
	 */
	if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
		pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
				"_tpg does not exist\n");
		return -EINVAL;
	}
	se_tpg = lun->lun_sep->sep_tpg;

	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
	tpg_ci = &nacl_ci->ci_group->cg_item;
	wwn_ci = &tpg_ci->ci_group->cg_item;
	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
	/*
	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
	 */
	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
		pr_err("Illegal Initiator ACL SymLink outside of %s\n",
			config_item_name(wwn_ci));
		return -EINVAL;
	}
	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
		pr_err("Illegal Initiator ACL Symlink outside of %s"
			" TPGT: %s\n", config_item_name(wwn_ci),
			config_item_name(tpg_ci));
		return -EINVAL;
	}
	/*
	 * If this struct se_node_acl was dynamically generated with
	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
	 * which be will write protected (READ-ONLY) when
	 * tpg_1/attrib/demo_mode_write_protect=1
	 */
	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
		lun_access = deve->lun_flags;
	else
		lun_access =
			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
					   TRANSPORT_LUNFLAGS_READ_WRITE;
	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
	/*
	 * Determine the actual mapped LUN value user wants..
	 *
	 * This value is what the SCSI Initiator actually sees the
	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
	 */
	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
			lun->unpacked_lun, lun_access);

	return (ret < 0) ? -EINVAL : 0;
}
コード例 #15
0
ファイル: sys.c プロジェクト: FatSunHYS/OSCourseDesign
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
	struct rlimit new_rlim, *old_rlim;
	unsigned long it_prof_secs;
	int retval;

	if (resource >= RLIM_NLIMITS)
		return -EINVAL;
	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
		return -EFAULT;
	if (new_rlim.rlim_cur > new_rlim.rlim_max)
		return -EINVAL;
	old_rlim = current->signal->rlim + resource;
	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
	    !capable(CAP_SYS_RESOURCE))
		return -EPERM;
	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
		return -EPERM;

	retval = security_task_setrlimit(resource, &new_rlim);
	if (retval)
		return retval;

	task_lock(current->group_leader);
	*old_rlim = new_rlim;
	task_unlock(current->group_leader);

	if (resource != RLIMIT_CPU)
		goto out;

	/*
	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
	 * very long-standing error, and fixing it now risks breakage of
	 * applications, so we live with it
	 */
	if (new_rlim.rlim_cur == RLIM_INFINITY)
		goto out;

	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
		unsigned long rlim_cur = new_rlim.rlim_cur;
		cputime_t cputime;

		if (rlim_cur == 0) {
			/*
			 * The caller is asking for an immediate RLIMIT_CPU
			 * expiry.  But we use the zero value to mean "it was
			 * never set".  So let's cheat and make it one second
			 * instead
			 */
			rlim_cur = 1;
		}
		cputime = secs_to_cputime(rlim_cur);
		read_lock(&tasklist_lock);
		spin_lock_irq(&current->sighand->siglock);
		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
		spin_unlock_irq(&current->sighand->siglock);
		read_unlock(&tasklist_lock);
	}
out:
	return 0;
}
コード例 #16
0
ファイル: btnode.c プロジェクト: Ale1ster/kerneldir
/**
 * nilfs_btnode_prepare_change_key
 *  prepare to move contents of the block for old key to one of new key.
 *  the old buffer will not be removed, but might be reused for new buffer.
 *  it might return -ENOMEM because of memory allocation errors,
 *  and might return -EIO because of disk read errors.
 */
int nilfs_btnode_prepare_change_key(struct address_space *btnc,
				    struct nilfs_btnode_chkey_ctxt *ctxt)
{
	struct buffer_head *obh, *nbh;
	struct inode *inode = NILFS_BTNC_I(btnc);
	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
	int err;

	if (oldkey == newkey)
		return 0;

	obh = ctxt->bh;
	ctxt->newbh = NULL;

	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
		lock_page(obh->b_page);
		/*
		 * We cannot call radix_tree_preload for the kernels older
		 * than 2.6.23, because it is not exported for modules.
		 */
retry:
		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
		if (err)
			goto failed_unlock;
		/* BUG_ON(oldkey != obh->b_page->index); */
		if (unlikely(oldkey != obh->b_page->index))
			NILFS_PAGE_BUG(obh->b_page,
				       "invalid oldkey %lld (newkey=%lld)",
				       (unsigned long long)oldkey,
				       (unsigned long long)newkey);

		spin_lock_irq(&btnc->tree_lock);
		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
		spin_unlock_irq(&btnc->tree_lock);
		/*
		 * Note: page->index will not change to newkey until
		 * nilfs_btnode_commit_change_key() will be called.
		 * To protect the page in intermediate state, the page lock
		 * is held.
		 */
		radix_tree_preload_end();
		if (!err)
			return 0;
		else if (err != -EEXIST)
			goto failed_unlock;

		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
		if (!err)
			goto retry;
		/* fallback to copy mode */
		unlock_page(obh->b_page);
	}

	nbh = nilfs_btnode_create_block(btnc, newkey);
	if (!nbh)
		return -ENOMEM;

	BUG_ON(nbh == obh);
	ctxt->newbh = nbh;
	return 0;

 failed_unlock:
	unlock_page(obh->b_page);
	return err;
}
コード例 #17
0
ファイル: ldusb.c プロジェクト: Tigrouzen/k1099
/**
 *	ld_usb_read
 */
static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
			   loff_t *ppos)
{
	struct ld_usb *dev;
	size_t *actual_buffer;
	size_t bytes_to_read;
	int retval = 0;
	int rv;

	dev = file->private_data;

	/* verify that we actually have some data to read */
	if (count == 0)
		goto exit;

	/* lock this object */
	if (down_interruptible(&dev->sem)) {
		retval = -ERESTARTSYS;
		goto exit;
	}

	/* verify that the device wasn't unplugged */
	if (dev->intf == NULL) {
		retval = -ENODEV;
		err("No device or device unplugged %d\n", retval);
		goto unlock_exit;
	}

	/* wait for data */
	spin_lock_irq(&dev->rbsl);
	if (dev->ring_head == dev->ring_tail) {
		dev->interrupt_in_done = 0;
		spin_unlock_irq(&dev->rbsl);
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			goto unlock_exit;
		}
		retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
		if (retval < 0)
			goto unlock_exit;
	} else {
		spin_unlock_irq(&dev->rbsl);
	}

	/* actual_buffer contains actual_length + interrupt_in_buffer */
	actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_tail*(sizeof(size_t)+dev->interrupt_in_endpoint_size));
	bytes_to_read = min(count, *actual_buffer);
	if (bytes_to_read < *actual_buffer)
		dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
			 *actual_buffer-bytes_to_read);

	/* copy one interrupt_in_buffer from ring_buffer into userspace */
	if (copy_to_user(buffer, actual_buffer+1, bytes_to_read)) {
		retval = -EFAULT;
		goto unlock_exit;
	}
	dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;

	retval = bytes_to_read;

	spin_lock_irq(&dev->rbsl);
	if (dev->buffer_overflow) {
		dev->buffer_overflow = 0;
		spin_unlock_irq(&dev->rbsl);
		rv = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
		if (rv < 0)
			dev->buffer_overflow = 1;
	} else {
		spin_unlock_irq(&dev->rbsl);
	}

unlock_exit:
	/* unlock the device */
	up(&dev->sem);

exit:
	return retval;
}
コード例 #18
0
static int dma_hw_params(struct snd_pcm_substream *substream,
	struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct runtime_data *prtd = runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	unsigned long totbytes = params_buffer_bytes(params);
	struct s3c_dma_params *dma =
		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
	struct samsung_dma_req req;
	struct samsung_dma_config config;

	pr_debug("Entered %s\n", __func__);

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma)
		return 0;

	/* this may get called several times by oss emulation
	 * with different params -HW */
	if (prtd->params == NULL) {
		/* prepare DMA */
		prtd->params = dma;

		pr_debug("params %p, client %p, channel %d\n", prtd->params,
			prtd->params->client, prtd->params->channel);

		prtd->params->ops = samsung_dma_get_ops();

		req.cap = (samsung_dma_has_circular() ?
			DMA_CYCLIC : DMA_SLAVE);
		req.client = prtd->params->client;
		config.direction =
			(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
			? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
		config.width = prtd->params->dma_size;
		config.maxburst = 1;
		config.fifo = prtd->params->dma_addr;
		prtd->params->ch = prtd->params->ops->request(
				prtd->params->channel, &req);
		prtd->params->ops->config(prtd->params->ch, &config);
	}

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);

	runtime->dma_bytes = totbytes;

	spin_lock_irq(&prtd->lock);
	prtd->dma_loaded = 0;
	prtd->dma_period = params_period_bytes(params);
	prtd->dma_start = runtime->dma_addr;
	prtd->dma_pos = prtd->dma_start;
	prtd->dma_end = prtd->dma_start + totbytes;

	if (runtime->dma_addr > EXYNOS_PA_AUDSS)
		prtd->dram_used = true;
	else
		prtd->dram_used = false;
	spin_unlock_irq(&prtd->lock);

	pr_debug("ADMA:%s:DmaAddr=@%x Total=%d PrdSz=%d #Prds=%d dma_area=0x%x\n",
		(substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? "P" : "C",
		prtd->dma_start, runtime->dma_bytes,
		params_period_bytes(params), params_periods(params),
		(unsigned int)runtime->dma_area);

	return 0;
}
コード例 #19
0
ファイル: cdc-acm.c プロジェクト: LITMUS-RT/litmus-rt-odroidx
static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
{
	struct acm *acm = container_of(port, struct acm, port);
	int retval = -ENODEV;

	dev_dbg(&acm->control->dev, "%s\n", __func__);

	mutex_lock(&acm->mutex);
	if (acm->disconnected)
		goto disconnected;

	retval = usb_autopm_get_interface(acm->control);
	if (retval)
		goto error_get_interface;

	/*
	 * FIXME: Why do we need this? Allocating 64K of physically contiguous
	 * memory is really nasty...
	 */
	set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
	acm->control->needs_remote_wakeup = 1;

	acm->ctrlurb->dev = acm->dev;
	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
		dev_err(&acm->control->dev,
			"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
		goto error_submit_urb;
	}

	acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
	if (acm_set_control(acm, acm->ctrlout) < 0 &&
	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
		goto error_set_control;

	usb_autopm_put_interface(acm->control);

	/*
	 * Unthrottle device in case the TTY was closed while throttled.
	 */
	spin_lock_irq(&acm->read_lock);
	acm->throttled = 0;
	acm->throttle_req = 0;
	spin_unlock_irq(&acm->read_lock);

	if (acm_submit_read_urbs(acm, GFP_KERNEL))
		goto error_submit_read_urbs;

	mutex_unlock(&acm->mutex);

	return 0;

error_submit_read_urbs:
	acm->ctrlout = 0;
	acm_set_control(acm, acm->ctrlout);
error_set_control:
	usb_kill_urb(acm->ctrlurb);
error_submit_urb:
	usb_autopm_put_interface(acm->control);
error_get_interface:
disconnected:
	mutex_unlock(&acm->mutex);
	return retval;
}
コード例 #20
0
ファイル: u_serial.c プロジェクト: 33d/linux-2.6.21-hh20
/*
 * gs_open sets up the link between a gs_port and its associated TTY.
 * That link is broken *only* by TTY close(), and all driver methods
 * know that.
 */
static int gs_open(struct tty_struct *tty, struct file *file)
{
	int		port_num = tty->index;
	struct gs_port	*port;
	int		status;

	if (port_num < 0 || port_num >= n_ports)
		return -ENXIO;

	do {
		mutex_lock(&ports[port_num].lock);
		port = ports[port_num].port;
		if (!port)
			status = -ENODEV;
		else {
			spin_lock_irq(&port->port_lock);

			/* already open?  Great. */
			if (port->open_count) {
				status = 0;
				port->open_count++;

			/* currently opening/closing? wait ... */
			} else if (port->openclose) {
				status = -EBUSY;

			/* ... else we do the work */
			} else {
				status = -EAGAIN;
				port->openclose = true;
			}
			spin_unlock_irq(&port->port_lock);
		}
		mutex_unlock(&ports[port_num].lock);

		switch (status) {
		default:
			/* fully handled */
			return status;
		case -EAGAIN:
			/* must do the work */
			break;
		case -EBUSY:
			/* wait for EAGAIN task to finish */
			msleep(1);
			/* REVISIT could have a waitchannel here, if
			 * concurrent open performance is important
			 */
			break;
		}
	} while (status != -EAGAIN);

	/* Do the "real open" */
	spin_lock_irq(&port->port_lock);

	/* allocate circular buffer on first open */
	if (port->port_write_buf.buf_buf == NULL) {

		spin_unlock_irq(&port->port_lock);
		status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
		spin_lock_irq(&port->port_lock);

		if (status) {
			pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
				port->port_num, tty, file);
			port->openclose = false;
			goto exit_unlock_port;
		}
	}

	/* REVISIT if REMOVED (ports[].port NULL), abort the open
	 * to let rmmod work faster (but this way isn't wrong).
	 */

	/* REVISIT maybe wait for "carrier detect" */

	tty->driver_data = port;
	port->port_tty = tty;

	port->open_count = 1;
	port->openclose = false;

	/* if connected, start the I/O stream */
	if (port->port_usb) {
		struct gserial	*gser = port->port_usb;

		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
		gs_start_io(port);

		if (gser->connect)
			gser->connect(gser);
	}

	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);

	status = 0;

exit_unlock_port:
	spin_unlock_irq(&port->port_lock);
	return status;
}
コード例 #21
0
static int stheno_do_request(void* arg)
{
    while(1)
    {
        wait_event_interruptible(stheno_process_q, stheno_processing == 1);

        stheno_processing = 0;

        do
        {
            struct request *req;
            int ret;

            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );

next_segment:
            if(req == NULL)break;

            /* ignore not fs cmd */
            if( ! blk_fs_request( req ) )
            {
                printk( KERN_ERR "skip no fs request\n" );
                spin_lock_irq( stheno_queue->queue_lock );
				ret=__blk_end_request_cur(req, -EIO);
				spin_unlock_irq( stheno_queue->queue_lock );
				if(ret==true) goto next_segment;
                continue;
            }

            /* ignore nr_sectors == 0 request */
            if( blk_rq_sectors(req) == 0 || blk_rq_cur_sectors(req) == 0)
            {
                printk( KERN_ERR "skip nr_sectors == (%d) current_nr_sectors == (%d) request\n", (int)blk_rq_sectors(req), (int)blk_rq_cur_sectors(req) );
                spin_lock_irq( stheno_queue->queue_lock );
				ret=__blk_end_request_cur(req, -EIO);
				spin_unlock_irq( stheno_queue->queue_lock );
				if(ret==true) goto next_segment;
                continue;
            }

            stheno_printk( KERN_NOTICE "stheno_request: REQUEST START! %s sect(%d) curr_count(%d) count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read",  (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

            if( seg_info.dirty )
            {
                stheno_printk( KERN_NOTICE "stheno_request:                %s dirty(%d) seg_start_sect(%d) seg_pos(%d) seg_nr_count(%d)\n", (rq_data_dir( req ) == WRITE) ? "write" : "read",  (int)seg_info.dirty,  (int)seg_info.start_sector, (int)seg_info.current_pos, (int)seg_info.nr_sectors );
            }

            if( seg_info.dirty && rq_data_dir( req ) != seg_info.cmd )
            {
                if( seg_info.cmd == WRITE )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ret(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos, ret);
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
						continue;                      
                    }
                }
                seg_info.dirty = 0;
            }
    
            if( rq_data_dir( req ) == WRITE )
            {
                if( !seg_info.dirty
                    && blk_rq_cur_sectors(req) < blk_rq_sectors(req) )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: new_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

                    seg_info.cmd = rq_data_dir( req ); 
                    seg_info.start_sector = blk_rq_pos(req);
                    seg_info.nr_sectors = blk_rq_sectors(req);
                    memcpy( seg_info.buffer, req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos = blk_rq_cur_sectors(req);
                    seg_info.dirty = 1;

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty
                    && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req)
                    && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req)
                    && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: add_segment(%d) cur_count(%d) count(%d)\n", (int)seg_info.dirty, (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));

                    memcpy( seg_info.buffer + (seg_info.current_pos * SECT_SIZE), req->buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos += blk_rq_cur_sectors(req);
                    if( seg_info.current_pos == seg_info.nr_sectors )
                    {
                        stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                        stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        seg_info.dirty = 0;
                        if( ret < 0 )
                        {
                            printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                            spin_lock_irq( stheno_queue->queue_lock );
							ret=__blk_end_request_cur(req, -EIO);
							spin_unlock_irq( stheno_queue->queue_lock );
							if(ret==true) goto next_segment;
							continue;                                   
                        }
                    }
					
					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty )
                {
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    ret = euryale_write_process(seg_info.start_sector, seg_info.current_pos, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment write sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.current_pos);
                    seg_info.dirty = 0;
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment write sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
						continue;                                
                    }
                }
            }
            else /* READ */
            {
                 if( !seg_info.dirty 
                    && blk_rq_cur_sectors(req) < blk_rq_sectors(req) )
                {
                    seg_info.cmd = rq_data_dir( req ); 
                    seg_info.start_sector = blk_rq_pos(req);
                    seg_info.nr_sectors = blk_rq_sectors(req);

                    stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) start\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                    ret = euryale_read_process(seg_info.start_sector, seg_info.nr_sectors, seg_info.buffer);
                    stheno_printk( KERN_NOTICE "stheno_request: segment read sect(%d) count(%d) end\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                    if( ret < 0 )
                    {
                        printk( KERN_ERR "stheno_request: segment read sect(%d) count(%d) ERROR\n", (int)seg_info.start_sector, (int)seg_info.nr_sectors);
                        spin_lock_irq( stheno_queue->queue_lock );
						ret=__blk_end_request_cur(req, -EIO);
						spin_unlock_irq( stheno_queue->queue_lock );
						if(ret==true) goto next_segment;
                        continue;
                    }

                    memcpy(req->buffer, seg_info.buffer, blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos = blk_rq_cur_sectors(req);
                    seg_info.dirty = 1;

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else if( seg_info.dirty
                    && seg_info.start_sector + seg_info.current_pos == blk_rq_pos(req)
                    && seg_info.nr_sectors - seg_info.current_pos == blk_rq_sectors(req)
                    && seg_info.nr_sectors - seg_info.current_pos >= blk_rq_cur_sectors(req) )
                {
                    memcpy(req->buffer, seg_info.buffer + (seg_info.current_pos * SECT_SIZE), blk_rq_cur_sectors(req) * SECT_SIZE);
                    seg_info.current_pos += blk_rq_cur_sectors(req);
                    if( seg_info.current_pos == seg_info.nr_sectors )
                    {
                        seg_info.dirty = 0;
                    }

					spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, 0);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
                else
                {
                    seg_info.dirty = 0;
                }
            }

            if( rq_data_dir( req ) == WRITE )
            {
                stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                ret = euryale_write_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer);
                stheno_printk( KERN_NOTICE "stheno_request: write sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                if( ret < 0 )
                {
                    printk( KERN_ERR "stheno_request: write sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                    spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, -EIO);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
					continue;                            
                }
            }
            else
            {
                stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) start\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                ret = euryale_read_process(blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer);
                stheno_printk( KERN_NOTICE "stheno_request: read sect(%d) cur_count(%d) count(%d) end\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                if( ret < 0 )
                {
                    printk( KERN_ERR "stheno_request: read sect(%d) cur_count(%d) count(%d) ERROR\n", (int)blk_rq_pos(req), (int)blk_rq_cur_sectors(req), (int)blk_rq_sectors(req));
                    spin_lock_irq( stheno_queue->queue_lock );
					ret=__blk_end_request_cur(req, -EIO);
					spin_unlock_irq( stheno_queue->queue_lock );
					if(ret==true) goto next_segment;
                    continue;
                }
            }

			spin_lock_irq( stheno_queue->queue_lock );
			ret=__blk_end_request_cur(req, 0);
			spin_unlock_irq( stheno_queue->queue_lock );
			if(ret==true) goto next_segment;
        }
        while(1);

    }
    
    return 0;
}
コード例 #22
0
ファイル: signal.c プロジェクト: dharamg3/2.6.37-Port
/*
 * OK, we're invoking a handler
 */	
static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
	      siginfo_t *info, sigset_t *oldset,
	      struct pt_regs * regs, int syscall)
{
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = current;
	int usig = sig;
	int ret;

	/*
	 * If we were from a system call, check for system call restarting...
	 */
	if (syscall) {
		switch (regs->ARM_r0) {
		case -ERESTART_RESTARTBLOCK:
		case -ERESTARTNOHAND:
			regs->ARM_r0 = -EINTR;
			break;
		case -ERESTARTSYS:
			if (!(ka->sa.sa_flags & SA_RESTART)) {
				regs->ARM_r0 = -EINTR;
				break;
			}
			/* fallthrough */
		case -ERESTARTNOINTR:
			setup_syscall_restart(regs);
		}
	}

	/*
	 * translate the signal
	 */
	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
		usig = thread->exec_domain->signal_invmap[usig];

	/*
	 * Set up the stack frame
	 */
	if (ka->sa.sa_flags & SA_SIGINFO)
		ret = setup_rt_frame(usig, ka, info, oldset, regs);
	else
		ret = setup_frame(usig, ka, oldset, regs);

	/*
	 * Check that the resulting registers are actually sane.
	 */
	ret |= !valid_user_regs(regs);

	if (ret != 0) {
		force_sigsegv(sig, tsk);
		return ret;
	}

	/*
	 * Block the signal if we were successful.
	 */
	spin_lock_irq(&tsk->sighand->siglock);
	sigorsets(&tsk->blocked, &tsk->blocked,
		  &ka->sa.sa_mask);
	if (!(ka->sa.sa_flags & SA_NODEFER))
		sigaddset(&tsk->blocked, sig);
	recalc_sigpending();
	spin_unlock_irq(&tsk->sighand->siglock);

	return 0;
}
コード例 #23
0
ファイル: rtc-mrst.c プロジェクト: 33d/linux-2.6.21-hh20
static int __devinit
vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
{
	int retval = 0;
	unsigned char rtc_control;

	/* There can be only one ... */
	if (mrst_rtc.dev)
		return -EBUSY;

	if (!iomem)
		return -ENODEV;

	iomem = request_mem_region(iomem->start, resource_size(iomem),
				   driver_name);
	if (!iomem) {
		dev_dbg(dev, "i/o mem already in use.\n");
		return -EBUSY;
	}

	mrst_rtc.irq = rtc_irq;
	mrst_rtc.iomem = iomem;
	mrst_rtc.dev = dev;
	dev_set_drvdata(dev, &mrst_rtc);

	mrst_rtc.rtc = rtc_device_register(driver_name, dev,
				&mrst_rtc_ops, THIS_MODULE);
	if (IS_ERR(mrst_rtc.rtc)) {
		retval = PTR_ERR(mrst_rtc.rtc);
		goto cleanup0;
	}

	rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));

	spin_lock_irq(&rtc_lock);
	mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
	rtc_control = vrtc_cmos_read(RTC_CONTROL);
	spin_unlock_irq(&rtc_lock);

	if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
		dev_dbg(dev, "TODO: support more than 24-hr BCD mode\n");

	if (rtc_irq) {
		retval = request_irq(rtc_irq, mrst_rtc_irq,
				IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
				mrst_rtc.rtc);
		if (retval < 0) {
			dev_dbg(dev, "IRQ %d is already in use, err %d\n",
				rtc_irq, retval);
			goto cleanup1;
		}
	}
	dev_dbg(dev, "initialised\n");
	return 0;

cleanup1:
	rtc_device_unregister(mrst_rtc.rtc);
cleanup0:
	dev_set_drvdata(dev, NULL);
	mrst_rtc.dev = NULL;
	release_mem_region(iomem->start, resource_size(iomem));
	dev_err(dev, "rtc-mrst: unable to initialise\n");
	return retval;
}
コード例 #24
0
ファイル: ipath_cq.c プロジェクト: 03199618/linux
/**
 * ipath_resize_cq - change the size of the CQ
 * @ibcq: the completion queue
 *
 * Returns 0 for success.
 */
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
	struct ipath_cq *cq = to_icq(ibcq);
	struct ipath_cq_wc *old_wc;
	struct ipath_cq_wc *wc;
	u32 head, tail, n;
	int ret;
	u32 sz;

	if (cqe < 1 || cqe > ib_ipath_max_cqes) {
		ret = -EINVAL;
		goto bail;
	}

	/*
	 * Need to use vmalloc() if we want to support large #s of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
	else
		sz += sizeof(struct ib_wc) * (cqe + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = -ENOMEM;
		goto bail;
	}

	/* Check that we can write the offset to mmap. */
	if (udata && udata->outlen >= sizeof(__u64)) {
		__u64 offset = 0;

		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
		if (ret)
			goto bail_free;
	}

	spin_lock_irq(&cq->lock);
	/*
	 * Make sure head and tail are sane since they
	 * might be user writable.
	 */
	old_wc = cq->queue;
	head = old_wc->head;
	if (head > (u32) cq->ibcq.cqe)
		head = (u32) cq->ibcq.cqe;
	tail = old_wc->tail;
	if (tail > (u32) cq->ibcq.cqe)
		tail = (u32) cq->ibcq.cqe;
	if (head < tail)
		n = cq->ibcq.cqe + 1 + head - tail;
	else
		n = head - tail;
	if (unlikely((u32)cqe < n)) {
		ret = -EINVAL;
		goto bail_unlock;
	}
	for (n = 0; tail != head; n++) {
		if (cq->ip)
			wc->uqueue[n] = old_wc->uqueue[tail];
		else
			wc->kqueue[n] = old_wc->kqueue[tail];
		if (tail == (u32) cq->ibcq.cqe)
			tail = 0;
		else
			tail++;
	}
	cq->ibcq.cqe = cqe;
	wc->head = n;
	wc->tail = 0;
	cq->queue = wc;
	spin_unlock_irq(&cq->lock);

	vfree(old_wc);

	if (cq->ip) {
		struct ipath_ibdev *dev = to_idev(ibcq->device);
		struct ipath_mmap_info *ip = cq->ip;

		ipath_update_mmap_info(dev, ip, sz, wc);

		/*
		 * Return the offset to mmap.
		 * See ipath_mmap() for details.
		 */
		if (udata && udata->outlen >= sizeof(__u64)) {
			ret = ib_copy_to_udata(udata, &ip->offset,
					       sizeof(ip->offset));
			if (ret)
				goto bail;
		}

		spin_lock_irq(&dev->pending_lock);
		if (list_empty(&ip->pending_mmaps))
			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = 0;
	goto bail;

bail_unlock:
	spin_unlock_irq(&cq->lock);
bail_free:
	vfree(wc);
bail:
	return ret;
}
コード例 #25
0
ファイル: rtc-mrst.c プロジェクト: 33d/linux-2.6.21-hh20
static void rtc_mrst_do_shutdown(void)
{
	spin_lock_irq(&rtc_lock);
	mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
	spin_unlock_irq(&rtc_lock);
}
コード例 #26
0
ファイル: iwctl.c プロジェクト: mbgg/linux
int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
		union iwreq_data *wrqu, char *extra)
{
	struct vnt_private *pDevice = netdev_priv(dev);
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	struct iw_point *wrq = &wrqu->encoding;
	struct iw_encode_ext *ext = (struct iw_encode_ext*)extra;
	struct viawget_wpa_param *param=NULL;
// original member
	wpa_alg alg_name;
	u8 addr[6];
	int key_idx;
	int set_tx = 0;
	u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
	u8 key[64];
	size_t seq_len = 0;
	size_t key_len = 0;
	u8 *buf;
	u8 key_array[64];
	int ret = 0;

	PRINT_K("SIOCSIWENCODEEXT......\n");

	if (pMgmt == NULL)
		return -EFAULT;

	buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;

	param = (struct viawget_wpa_param *)buf;

// recover alg_name
	switch (ext->alg) {
	case IW_ENCODE_ALG_NONE:
		alg_name = WPA_ALG_NONE;
		break;
	case IW_ENCODE_ALG_WEP:
		alg_name = WPA_ALG_WEP;
		break;
	case IW_ENCODE_ALG_TKIP:
		alg_name = WPA_ALG_TKIP;
		break;
	case IW_ENCODE_ALG_CCMP:
		alg_name = WPA_ALG_CCMP;
		break;
	default:
		PRINT_K("Unknown alg = %d\n",ext->alg);
		ret= -ENOMEM;
		goto error;
	}
// recover addr
	memcpy(addr, ext->addr.sa_data, ETH_ALEN);
// recover key_idx
	key_idx = (wrq->flags&IW_ENCODE_INDEX) - 1;
// recover set_tx
	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
		set_tx = 1;
// recover seq,seq_len
	if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
		seq_len=IW_ENCODE_SEQ_MAX_SIZE;
		memcpy(seq, ext->rx_seq, seq_len);
	}
// recover key,key_len
	if (ext->key_len) {
		key_len = ext->key_len;
		memcpy(key, &ext->key[0], key_len);
	}
	memset(key_array, 0, 64);
	if (key_len > 0) {
		memcpy(key_array, key, key_len);
		if (key_len == 32) {
			// notice ! the oder
			memcpy(&key_array[16], &key[24], 8);
			memcpy(&key_array[24], &key[16], 8);
		}
	}

/**************Translate iw_encode_ext to viawget_wpa_param****************/
	memcpy(param->addr, addr, ETH_ALEN);
	param->u.wpa_key.alg_name = (int)alg_name;
	param->u.wpa_key.set_tx = set_tx;
	param->u.wpa_key.key_index = key_idx;
	param->u.wpa_key.key_len = key_len;
	param->u.wpa_key.key = (u8 *)key_array;
	param->u.wpa_key.seq = (u8 *)seq;
	param->u.wpa_key.seq_len = seq_len;

/****set if current action is Network Manager count?? */
/****this method is so foolish,but there is no other way??? */
	if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
		if (param->u.wpa_key.key_index ==0) {
			pDevice->bwextstep0 = true;
		}
		if ((pDevice->bwextstep0 == true) && (param->u.wpa_key.key_index == 1)) {
			pDevice->bwextstep0 = false;
			pDevice->bwextstep1 = true;
		}
		if ((pDevice->bwextstep1 == true) && (param->u.wpa_key.key_index == 2)) {
			pDevice->bwextstep1 = false;
			pDevice->bwextstep2 = true;
		}
		if ((pDevice->bwextstep2 == true) && (param->u.wpa_key.key_index == 3)) {
			pDevice->bwextstep2 = false;
			pDevice->bwextstep3 = true;
		}
	}
	if (pDevice->bwextstep3 == true) {
		PRINT_K("SIOCSIWENCODEEXT:Enable WPA WEXT SUPPORT!!!!!\n");
		pDevice->bwextstep0 = false;
		pDevice->bwextstep1 = false;
		pDevice->bwextstep2 = false;
		pDevice->bwextstep3 = false;
		pDevice->bWPASuppWextEnabled = true;
		memset(pMgmt->abyDesireBSSID, 0xFF, 6);
		KeyvInitTable(pDevice, &pDevice->sKey);
	}
/*******/
	spin_lock_irq(&pDevice->lock);
	ret = wpa_set_keys(pDevice, param);
	spin_unlock_irq(&pDevice->lock);

error:
	kfree(buf);
	return ret;
}
コード例 #27
0
ファイル: main.c プロジェクト: Excito/compat-wireless
/**
 *  lbs_thread - handles the major jobs in the LBS driver.
 *  It handles all events generated by firmware, RX data received
 *  from firmware and TX data sent from kernel.
 *
 *  @data:	A pointer to &lbs_thread structure
 *  returns:	0
 */
static int lbs_thread(void *data)
{
	struct net_device *dev = data;
	struct lbs_private *priv = dev->ml_priv;
	wait_queue_t wait;

	lbs_deb_enter(LBS_DEB_THREAD);

	init_waitqueue_entry(&wait, current);

	for (;;) {
		int shouldsleep;
		u8 resp_idx;

		lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
				priv->currenttxskb, priv->dnld_sent);

		add_wait_queue(&priv->waitq, &wait);
		set_current_state(TASK_INTERRUPTIBLE);
		spin_lock_irq(&priv->driver_lock);

		if (kthread_should_stop())
			shouldsleep = 0;	/* Bye */
		else if (priv->surpriseremoved)
			shouldsleep = 1;	/* We need to wait until we're _told_ to die */
		else if (priv->psstate == PS_STATE_SLEEP)
			shouldsleep = 1;	/* Sleep mode. Nothing we can do till it wakes */
		else if (priv->cmd_timed_out)
			shouldsleep = 0;	/* Command timed out. Recover */
		else if (!priv->fw_ready)
			shouldsleep = 1;	/* Firmware not ready. We're waiting for it */
		else if (priv->dnld_sent)
			shouldsleep = 1;	/* Something is en route to the device already */
		else if (priv->tx_pending_len > 0)
			shouldsleep = 0;	/* We've a packet to send */
		else if (priv->resp_len[priv->resp_idx])
			shouldsleep = 0;	/* We have a command response */
		else if (priv->cur_cmd)
			shouldsleep = 1;	/* Can't send a command; one already running */
		else if (!list_empty(&priv->cmdpendingq) &&
					!(priv->wakeup_dev_required))
			shouldsleep = 0;	/* We have a command to send */
		else if (kfifo_len(&priv->event_fifo))
			shouldsleep = 0;	/* We have an event to process */
		else
			shouldsleep = 1;	/* No command */

		if (shouldsleep) {
			lbs_deb_thread("sleeping, connect_status %d, "
				"psmode %d, psstate %d\n",
				priv->connect_status,
				priv->psmode, priv->psstate);
			spin_unlock_irq(&priv->driver_lock);
			schedule();
		} else
			spin_unlock_irq(&priv->driver_lock);

		lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		set_current_state(TASK_RUNNING);
		remove_wait_queue(&priv->waitq, &wait);

		lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
			       priv->currenttxskb, priv->dnld_sent);

		if (kthread_should_stop()) {
			lbs_deb_thread("break from main thread\n");
			break;
		}

		if (priv->surpriseremoved) {
			lbs_deb_thread("adapter removed; waiting to die...\n");
			continue;
		}

		lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
		       priv->currenttxskb, priv->dnld_sent);

		/* Process any pending command response */
		spin_lock_irq(&priv->driver_lock);
		resp_idx = priv->resp_idx;
		if (priv->resp_len[resp_idx]) {
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_command_response(priv,
				priv->resp_buf[resp_idx],
				priv->resp_len[resp_idx]);
			spin_lock_irq(&priv->driver_lock);
			priv->resp_len[resp_idx] = 0;
		}
		spin_unlock_irq(&priv->driver_lock);

		/* Process hardware events, e.g. card removed, link lost */
		spin_lock_irq(&priv->driver_lock);
		while (kfifo_len(&priv->event_fifo)) {
			u32 event;

			if (kfifo_out(&priv->event_fifo,
				(unsigned char *) &event, sizeof(event)) !=
				sizeof(event))
					break;
			spin_unlock_irq(&priv->driver_lock);
			lbs_process_event(priv, event);
			spin_lock_irq(&priv->driver_lock);
		}
		spin_unlock_irq(&priv->driver_lock);

		if (priv->wakeup_dev_required) {
			lbs_deb_thread("Waking up device...\n");
			/* Wake up device */
			if (priv->exit_deep_sleep(priv))
				lbs_deb_thread("Wakeup device failed\n");
			continue;
		}

		/* command timeout stuff */
		if (priv->cmd_timed_out && priv->cur_cmd) {
			struct cmd_ctrl_node *cmdnode = priv->cur_cmd;

			netdev_info(dev, "Timeout submitting command 0x%04x\n",
				    le16_to_cpu(cmdnode->cmdbuf->command));
			lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
			if (priv->reset_card)
				priv->reset_card(priv);
		}
		priv->cmd_timed_out = 0;

		if (!priv->fw_ready)
			continue;

		/* Check if we need to confirm Sleep Request received previously */
		if (priv->psstate == PS_STATE_PRE_SLEEP &&
		    !priv->dnld_sent && !priv->cur_cmd) {
			if (priv->connect_status == LBS_CONNECTED) {
				lbs_deb_thread("pre-sleep, currenttxskb %p, "
					"dnld_sent %d, cur_cmd %p\n",
					priv->currenttxskb, priv->dnld_sent,
					priv->cur_cmd);

				lbs_ps_confirm_sleep(priv);
			} else {
				/* workaround for firmware sending
				 * deauth/linkloss event immediately
				 * after sleep request; remove this
				 * after firmware fixes it
				 */
				priv->psstate = PS_STATE_AWAKE;
				netdev_alert(dev,
					     "ignore PS_SleepConfirm in non-connected state\n");
			}
		}

		/* The PS state is changed during processing of Sleep Request
		 * event above
		 */
		if ((priv->psstate == PS_STATE_SLEEP) ||
		    (priv->psstate == PS_STATE_PRE_SLEEP))
			continue;

		if (priv->is_deep_sleep)
			continue;

		/* Execute the next command */
		if (!priv->dnld_sent && !priv->cur_cmd)
			lbs_execute_next_command(priv);

		spin_lock_irq(&priv->driver_lock);
		if (!priv->dnld_sent && priv->tx_pending_len > 0) {
			int ret = priv->hw_host_to_card(priv, MVMS_DAT,
							priv->tx_pending_buf,
							priv->tx_pending_len);
			if (ret) {
				lbs_deb_tx("host_to_card failed %d\n", ret);
				priv->dnld_sent = DNLD_RES_RECEIVED;
			} else {
				mod_timer(&priv->tx_lockup_timer,
					  jiffies + (HZ * 5));
			}
			priv->tx_pending_len = 0;
			if (!priv->currenttxskb) {
				/* We can wake the queues immediately if we aren't
				   waiting for TX feedback */
				if (priv->connect_status == LBS_CONNECTED)
					netif_wake_queue(priv->dev);
				if (priv->mesh_dev &&
				    netif_running(priv->mesh_dev))
					netif_wake_queue(priv->mesh_dev);
			}
		}
		spin_unlock_irq(&priv->driver_lock);
	}

	del_timer(&priv->command_timer);
	del_timer(&priv->tx_lockup_timer);
	del_timer(&priv->auto_deepsleep_timer);

	lbs_deb_leave(LBS_DEB_THREAD);
	return 0;
}
コード例 #28
0
ファイル: iwctl.c プロジェクト: mbgg/linux
/*
 * Wireless Handler: set scan
 */
int iwctl_siwscan(struct net_device *dev, struct iw_request_info *info,
		union iwreq_data *wrqu, char *extra)
{
	struct vnt_private *pDevice = netdev_priv(dev);
	struct iw_point *wrq = &wrqu->data;
	struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
	struct iw_scan_req *req = (struct iw_scan_req *)extra;
	u8 abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
	PWLAN_IE_SSID pItemSSID = NULL;

	if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
		return -EINVAL;

	PRINT_K(" SIOCSIWSCAN\n");

	if (pMgmt == NULL)
		return -EFAULT;

	if (pMgmt->eScanState ==  WMAC_IS_SCANNING) {
		// In scanning..
		PRINT_K("SIOCSIWSCAN(overlap??)-->In scanning...\n");
		return -EAGAIN;
	}

	if (pDevice->byReAssocCount > 0) { // reject scan when re-associating!
		// send scan event to wpa_Supplicant
		union iwreq_data wrqu;
		PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n");
		memset(&wrqu, 0, sizeof(wrqu));
		wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL);
		return 0;
	}

	spin_lock_irq(&pDevice->lock);

	BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);

	// mike add: active scan OR passive scan OR desire_ssid scan
	if (wrq->length == sizeof(struct iw_scan_req)) {
		if (wrq->flags & IW_SCAN_THIS_ESSID) { // desire_ssid scan
			memset(abyScanSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
			pItemSSID = (PWLAN_IE_SSID)abyScanSSID;
			pItemSSID->byElementID = WLAN_EID_SSID;
			memcpy(pItemSSID->abySSID, req->essid, (int)req->essid_len);
			if (pItemSSID->abySSID[req->essid_len] == '\0') {
				if (req->essid_len > 0)
					pItemSSID->len = req->essid_len;
			} else {
				pItemSSID->len = req->essid_len;
			}
			pMgmt->eScanType = WMAC_SCAN_PASSIVE;
			PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n", ((PWLAN_IE_SSID)abyScanSSID)->abySSID,
				((PWLAN_IE_SSID)abyScanSSID)->len);
			bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
			spin_unlock_irq(&pDevice->lock);

			return 0;
		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { // passive scan
			pMgmt->eScanType = WMAC_SCAN_PASSIVE;
		}
	} else { // active scan
		pMgmt->eScanType = WMAC_SCAN_ACTIVE;
	}

	pMgmt->eScanType = WMAC_SCAN_PASSIVE;
	bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
	spin_unlock_irq(&pDevice->lock);

	return 0;
}
コード例 #29
0
ファイル: cdc-acm.c プロジェクト: mb3dot/community-b3-kernel
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
	struct acm *acm;
	int rv = -ENODEV;

	mutex_lock(&open_mutex);

	acm = acm_table[tty->index];
	if (!acm || !acm->dev)
		goto out;
	else
		rv = 0;

	dev_dbg(&acm->control->dev, "%s\n", __func__);

	set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);

	tty->driver_data = acm;
	tty_port_tty_set(&acm->port, tty);

	if (usb_autopm_get_interface(acm->control) < 0)
		goto early_bail;
	else
		acm->control->needs_remote_wakeup = 1;

	mutex_lock(&acm->mutex);
	if (acm->port.count++) {
		mutex_unlock(&acm->mutex);
		usb_autopm_put_interface(acm->control);
		goto out;
	}

	acm->ctrlurb->dev = acm->dev;
	if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
		dev_err(&acm->control->dev,
			"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
		goto bail_out;
	}

	if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
	    (acm->ctrl_caps & USB_CDC_CAP_LINE))
		goto bail_out;

	usb_autopm_put_interface(acm->control);

	/*
	 * Unthrottle device in case the TTY was closed while throttled.
	 */
	spin_lock_irq(&acm->read_lock);
	acm->throttled = 0;
	acm->throttle_req = 0;
	spin_unlock_irq(&acm->read_lock);

	if (acm_submit_read_urbs(acm, GFP_KERNEL))
		goto bail_out;

	set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
	rv = tty_port_block_til_ready(&acm->port, tty, filp);

	mutex_unlock(&acm->mutex);
out:
	mutex_unlock(&open_mutex);
	return rv;

bail_out:
	acm->port.count--;
	mutex_unlock(&acm->mutex);
	usb_autopm_put_interface(acm->control);
early_bail:
	mutex_unlock(&open_mutex);
	tty_port_tty_set(&acm->port, NULL);
	return -EIO;
}