Example #1
0
/**
 * mipv6_halist_add - Add new home agent to the Home Agents List
 * @ifindex: interface identifier
 * @glob_addr: home agent's global address
 * @ll_addr: home agent's link-local address
 * @pref: relative preference for this home agent
 * @lifetime: lifetime for the entry
 *
 * Adds new home agent to the Home Agents List.  The list is interface
 * specific and @ifindex tells through which interface the home agent
 * was heard.  Returns zero on success and negative on failure.
 **/
int mipv6_halist_add(int ifindex, struct in6_addr *glob_addr,
		     struct in6_addr *ll_addr, int pref, __u32 lifetime)
{
	int update = 0, ret = 0;
	long mpref;
	struct mipv6_halist_entry *entry = NULL;
	unsigned long flags;

	DEBUG_FUNC();

	write_lock_irqsave(&home_agents->lock, flags);

	if (glob_addr == NULL || lifetime <= 0) {
		DEBUG((DBG_WARNING, "mipv6_halist_add: invalid arguments"));
		write_unlock_irqrestore(&home_agents->lock, flags);
		return -1;
	}
	mpref = PREF_BASE - pref;
	if ((entry = hashlist_get(home_agents->entries, glob_addr)) != NULL) {
		if (entry->ifindex == ifindex) {
			DEBUG((DBG_DATADUMP, "mipv6_halist_add: updating old entry"));
			update = 1;
		} else {
			update = 0;
		}
	}
	if (update) {
		entry->expire = jiffies + lifetime * HZ;
		if (entry->preference != mpref) {
			entry->preference = mpref;
			ret = hashlist_reschedule(home_agents->entries, glob_addr, mpref);
		}
	} else {
		entry = mipv6_halist_new_entry();
		if (entry == NULL) {
			DEBUG((DBG_INFO, "mipv6_halist_add: list full"));
			write_unlock_irqrestore(&home_agents->lock, flags);
			return -1;
		}
		entry->ifindex = ifindex;
		if (ll_addr)
			ipv6_addr_copy(&entry->link_local_addr, ll_addr);
		else
			ipv6_addr_set(&entry->link_local_addr, 0, 0, 0, 0);
		ipv6_addr_copy(&entry->global_addr, glob_addr);
		entry->preference = mpref;
		entry->expire = jiffies + lifetime * HZ;
		ret = hashlist_add(home_agents->entries, glob_addr, mpref, entry);
	}
	write_unlock_irqrestore(&home_agents->lock, flags);

	return ret;
}
Example #2
0
static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
						   ptp_info);
	unsigned long flags;

	write_lock_irqsave(&tstamp->lock, flags);
	timecounter_adjtime(&tstamp->clock, delta);
	write_unlock_irqrestore(&tstamp->lock, flags);

	return 0;
}
void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
{
	struct drm_psb_private *dev_priv = psb_priv(dev);
	struct ttm_fence_device *fdev = &dev_priv->fdev;
	struct ttm_fence_class_manager *fc =
				&fdev->fence_class[fence_class];
	unsigned long irq_flags;

	write_lock_irqsave(&fc->lock, irq_flags);
	psb_fence_poll(fdev, fence_class, fc->waiting_types);
	write_unlock_irqrestore(&fc->lock, irq_flags);
}
Example #4
0
static void mlx5e_timestamp_overflow(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
						   overflow_work);
	unsigned long flags;

	write_lock_irqsave(&tstamp->lock, flags);
	timecounter_read(&tstamp->clock);
	write_unlock_irqrestore(&tstamp->lock, flags);
	schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
}
Example #5
0
/**
 * mlx4_en_phc_adjtime - Shift the time of the hardware clock
 * @ptp: ptp clock structure
 * @delta: Desired change in nanoseconds
 *
 * Adjust the timer by resetting the timecounter structure.
 **/
static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
    struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
                                            ptp_clock_info);
    unsigned long flags;

    write_lock_irqsave(&mdev->clock_lock, flags);
    timecounter_adjtime(&mdev->clock, delta);
    write_unlock_irqrestore(&mdev->clock_lock, flags);

    return 0;
}
Example #6
0
static int
zfcp_statistics_clear(struct list_head *head)
{
	int retval = 0;
	unsigned long flags;
	struct zfcp_statistics *stat, *tmp;

	write_lock_irqsave(&zfcp_data.stat_lock, flags);
	list_for_each_entry_safe(stat, tmp, head, list) {
		list_del(&stat->list);
		kfree(stat);
	}
Example #7
0
/*************** primitives for use in any context *********************/
static inline uint8_t hp_sdc_status_in8(void)
{
	uint8_t status;
	unsigned long flags;

	write_lock_irqsave(&hp_sdc.ibf_lock, flags);
	status = sdc_readb(hp_sdc.status_io);
	if (!(status & HP_SDC_STATUS_IBF))
		hp_sdc.ibf = 0;
	write_unlock_irqrestore(&hp_sdc.ibf_lock, flags);

	return status;
}
Example #8
0
// __stp_tf_map_initialize():  Initialize the free list.  Grabs the
// lock.
static void
__stp_tf_map_initialize(void)
{
	int i;
	struct hlist_head *head = &__stp_tf_map_free_list[0];

	unsigned long flags;
	write_lock_irqsave(&__stp_tf_map_lock, flags);
	for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) {
		hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head);
	}
	write_unlock_irqrestore(&__stp_tf_map_lock, flags);
}
void insert_sysinfo_snapshot(struct sysinfo_snapshot *target){
	unsigned long flags;

	write_lock_irqsave(&sysinfo_snapshot_lock, flags);
	if(snapshot_head == NULL){
		snapshot_head = target;
	} else{
		snapshot_tail->next = target;
	}
	snapshot_tail = target;
	write_unlock_irqrestore(&sysinfo_snapshot_lock, flags);
	return;
}
Example #10
0
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
					      mdev->overflow_period);
	unsigned long flags;

	if (timeout) {
		write_lock_irqsave(&mdev->clock_lock, flags);
		timecounter_read(&mdev->clock);
		write_unlock_irqrestore(&mdev->clock_lock, flags);
		mdev->last_overflow_check = jiffies;
	}
}
Example #11
0
void mlx5e_ptp_overflow_check(struct mlx5e_priv *priv)
{
	bool timeout = time_is_before_jiffies(priv->tstamp.last_overflow_check +
					      priv->tstamp.overflow_period);
	unsigned long flags;

	if (timeout) {
		write_lock_irqsave(&priv->tstamp.lock, flags);
		timecounter_read(&priv->tstamp.clock);
		write_unlock_irqrestore(&priv->tstamp.lock, flags);
		priv->tstamp.last_overflow_check = jiffies;
	}
}
Example #12
0
void rxe_pool_cleanup(struct rxe_pool *pool)
{
	unsigned long flags;

	write_lock_irqsave(&pool->pool_lock, flags);
	pool->state = RXE_POOL_STATE_INVALID;
	if (atomic_read(&pool->num_elem) > 0)
		pr_warn("%s pool destroyed with unfree'd elem\n",
			pool_name(pool));
	write_unlock_irqrestore(&pool->pool_lock, flags);

	rxe_pool_put(pool);
}
Example #13
0
/**
 * ccp_add_device - add a CCP device to the list
 *
 * @ccp: ccp_device struct pointer
 *
 * Put this CCP on the unit list, which makes it available
 * for use.
 *
 * Returns zero if a CCP device is present, -ENODEV otherwise.
 */
void ccp_add_device(struct ccp_device *ccp)
{
	unsigned long flags;

	write_lock_irqsave(&ccp_unit_lock, flags);
	list_add_tail(&ccp->entry, &ccp_units);
	if (!ccp_rr)
		/* We already have the list lock (we're first) so this
		 * pointer can't change on us. Set its initial value.
		 */
		ccp_rr = ccp;
	write_unlock_irqrestore(&ccp_unit_lock, flags);
}
Example #14
0
/**
 * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
 * @port: zfcp_port whose fc_rport we should try to unblock
 */
static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
{
	unsigned long flags;
	struct zfcp_adapter *adapter = port->adapter;
	int port_status;
	struct Scsi_Host *shost = adapter->scsi_host;
	struct scsi_device *sdev;

	write_lock_irqsave(&adapter->erp_lock, flags);
	port_status = atomic_read(&port->status);
	if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED)    == 0 ||
	    (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
			    ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
		/* new ERP of severity >= port triggered elsewhere meanwhile or
		 * local link down (adapter erp_failed but not clear unblock)
		 */
		zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
		write_unlock_irqrestore(&adapter->erp_lock, flags);
		return;
	}
	spin_lock(shost->host_lock);
	__shost_for_each_device(sdev, shost) {
		struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
		int lun_status;

		if (zsdev->port != port)
			continue;
		/* LUN under port of interest */
		lun_status = atomic_read(&zsdev->status);
		if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
			continue; /* unblock rport despite failed LUNs */
		/* LUN recovery not given up yet [maybe follow-up pending] */
		if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
		    (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
			/* LUN blocked:
			 * not yet unblocked [LUN recovery pending]
			 * or meanwhile blocked [new LUN recovery triggered]
			 */
			zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
			spin_unlock(shost->host_lock);
			write_unlock_irqrestore(&adapter->erp_lock, flags);
			return;
		}
	}
	/* now port has no child or all children have completed recovery,
	 * and no ERP of severity >= port was meanwhile triggered elsewhere
	 */
	zfcp_scsi_schedule_rport_register(port);
	spin_unlock(shost->host_lock);
	write_unlock_irqrestore(&adapter->erp_lock, flags);
}
/*!
******************************************************************************

 @Function	LinuxEventObjectAdd
 
 @Description 
 
 Linux wait object addition

 @Input    hOSEventObjectList : Event object list handle 
 @Output   phOSEventObject : Pointer to the event object handle 
 
 @Return   PVRSRV_ERROR  :  Error code

******************************************************************************/
PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
 {
	PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; 
	PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
	IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
	PVRSRV_PER_PROCESS_DATA *psPerProc;
	unsigned long ulLockFlags;

	psPerProc = PVRSRVPerProcessData(ui32PID);
	if (psPerProc == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/* allocate completion variable */
	if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), 
		(IMG_VOID **)&psLinuxEventObject, IMG_NULL,
		"Linux Event Object") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));		
		return PVRSRV_ERROR_OUT_OF_MEMORY;	
	}
	
	INIT_LIST_HEAD(&psLinuxEventObject->sList);

	atomic_set(&psLinuxEventObject->sTimeStamp, 0);
	psLinuxEventObject->ui32TimeStampPrevious = 0;

#if defined(DEBUG)
	psLinuxEventObject->ui32Stats = 0;
#endif
    init_waitqueue_head(&psLinuxEventObject->sWait);

	psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;

	psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
													 RESMAN_TYPE_EVENT_OBJECT,
													 psLinuxEventObject,
													 0,
													 &LinuxEventObjectDeleteCallback);	

	write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags);
	list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
	write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags);
	
	*phOSEventObject = psLinuxEventObject;

	return PVRSRV_OK;	 
}
Example #16
0
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
	panic("Disabled");
#if 0 // AKAROS_PORT
	struct mlx4_dev *dev = mdev->dev;
	unsigned long flags;
	uint64_t ns, zero = 0;

	rwlock_init(&mdev->clock_lock);

	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
	mdev->cycles.read = mlx4_en_read_clock;
	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
	/* Using shift to make calculation more accurate. Since current HW
	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
	 * register, the biggest shift when calculating using u64, is 14
	 * (max_cycles * multiplier < 2^64)
	 */
	mdev->cycles.shift = 14;
	mdev->cycles.mult =
		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
	mdev->nominal_c_mult = mdev->cycles.mult;

	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_init(&mdev->clock, &mdev->cycles,
			 epoch_nsec());
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	/* Calculate period in seconds to call the overflow watchdog - to make
	 * sure counter is checked at least once every wrap around.
	 */
	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
	do_div(ns, NSEC_PER_SEC / 2 / HZ);
	mdev->overflow_period = ns;

	/* Configure the PHC */
	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");

	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
					     &mdev->pdev->dev);
	if (IS_ERR(mdev->ptp_clock)) {
		mdev->ptp_clock = NULL;
		mlx4_err(mdev, "ptp_clock_register failed\n");
	} else {
		mlx4_info(mdev, "registered PHC clock\n");
	}

#endif
}
Example #17
0
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
{
    unsigned long flags;
    struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
    struct zfcp_port *port = zfcp_sdev->port;
    struct zfcp_adapter *adapter = port->adapter;
    int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;

    write_lock_irqsave(&adapter->erp_lock, flags);
    _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
    write_unlock_irqrestore(&adapter->erp_lock, flags);

    zfcp_erp_wait(adapter);
}
Example #18
0
static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
			     const struct timespec64 *ts)
{
	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
						   ptp_info);
	u64 ns = timespec64_to_ns(ts);
	unsigned long flags;

	write_lock_irqsave(&tstamp->lock, flags);
	timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
	write_unlock_irqrestore(&tstamp->lock, flags);

	return 0;
}
Example #19
0
//******************************************************************************
// Function:   iiWaitForTxEmptyII(pB, mSdelay)
// Parameters: pB      - pointer to board structure
//             mSdelay - period to wait before returning
//
// Returns:    True if the FIFO is empty.
//             False if it not empty in the required time: the pB->i2eError
//             field has the error.
//
// Description:
//
// Waits up to "mSdelay" milliseconds for the outgoing FIFO to become empty; if
// not empty by the required time, returns false and error in pB->i2eError,
// otherwise returns true.
//
// mSdelay == 0 is taken to mean must be empty on the first test.
//
// This version operates on IntelliPort-II - style FIFO's
//
// Note this routine is organized so that if status is ok there is no delay at
// all called either before or after the test.  Is called indirectly through
// pB->i2eWaitForTxEmpty.
//
//******************************************************************************
static int
iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay)
{
	unsigned long	flags;
	int itemp;

	for (;;)
	{
		// This routine hinges on being able to see the "other" status register
		// (as seen by the local processor).  His incoming fifo is our outgoing
		// FIFO.
		//
		// By the nature of this routine, you would be using this as part of a
		// larger atomic context: i.e., you would use this routine to ensure the
		// fifo empty, then act on this information. Between these two halves, 
		// you will generally not want to service interrupts or in any way 
		// disrupt the assumptions implicit in the larger context.
		//
		// Even worse, however, this routine "shifts" the status register to 
		// point to the local status register which is not the usual situation.
		// Therefore for extra safety, we force the critical section to be
		// completely atomic, and pick up after ourselves before allowing any
		// interrupts of any kind.


		write_lock_irqsave(&Dl_spinlock, flags);
		outb(SEL_COMMAND, pB->i2ePointer);
		outb(SEL_CMD_SH, pB->i2ePointer);

		itemp = inb(pB->i2eStatus);

		outb(SEL_COMMAND, pB->i2ePointer);
		outb(SEL_CMD_UNSH, pB->i2ePointer);

		if (itemp & ST_IN_EMPTY)
		{
			I2_UPDATE_FIFO_ROOM(pB);
			write_unlock_irqrestore(&Dl_spinlock, flags);
			I2_COMPLETE(pB, I2EE_GOOD);
		}

		write_unlock_irqrestore(&Dl_spinlock, flags);

		if (mSdelay-- == 0)
			break;

		iiDelay(pB, 1);      /* 1 mS granularity on checking condition */
	}
	I2_COMPLETE(pB, I2EE_TXE_TIME);
}
Example #20
0
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
	struct mlx4_dev *dev = mdev->dev;
	unsigned long flags;
	u64 ns;

	/* mlx4_en_init_timestamp is called for each netdev.
	 * mdev->ptp_clock is common for all ports, skip initialization if
	 * was done for other port.
	 */
	if (mdev->ptp_clock)
		return;

	rwlock_init(&mdev->clock_lock);

	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
	mdev->cycles.read = mlx4_en_read_clock;
	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
	mdev->cycles.mult =
		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
	mdev->nominal_c_mult = mdev->cycles.mult;

	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_init(&mdev->clock, &mdev->cycles,
			 ktime_to_ns(ktime_get_real()));
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	/* Calculate period in seconds to call the overflow watchdog - to make
	 * sure counter is checked at least once every wrap around.
	 */
	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
	do_div(ns, NSEC_PER_SEC / 2 / HZ);
	mdev->overflow_period = ns;

	/* Configure the PHC */
	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");

	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
					     &mdev->pdev->dev);
	if (IS_ERR(mdev->ptp_clock)) {
		mdev->ptp_clock = NULL;
		mlx4_err(mdev, "ptp_clock_register failed\n");
	} else {
		mlx4_info(mdev, "registered PHC clock\n");
	}

}
Example #21
0
int
EvGetMemberName(EvGroupID_t groupID, EvGroupID_t memberID, char *memberName)
{
    EvGroupInfo_t *EGroup;
    EvMemberList_t *Member;
    unsigned long Flags;

    write_lock_irqsave(&EvGroupLock, Flags);
    if ((EGroup = EvGetGroupBase(groupID)) == NULL) {
        write_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_GROUP_EXIST;
    }

    write_lock(&EGroup->EgiLock);
    write_unlock(&EvGroupLock);

    /* If this is a member and not a master group controler. */
    if (EGroup->EgiType == EG_MEMBER) {
        /* If the member is this group member then localize. */
        if (EGroup->EgiMemberID == memberID) {
            strncpy(memberName, EGroup->EgiMemberName, 16);
            write_unlock_irqrestore(&EGroup->EgiLock, Flags);
            return EV_NOERR;
        }

        return EvSendGetMemberName(EGroup, memberID, memberName, Flags);
    }

    if (EGroup->EgiMemberID == memberID) {
        strncpy(memberName, EGroup->EgiMemberName, 16);
        write_unlock_irqrestore(&EGroup->EgiLock, Flags);
        return EV_NOERR;
    }

    Member = EGroup->EgiMembers;

    while (Member) {
        if (Member->EslID == memberID) {
            strncpy(memberName, Member->EslName, 16);
            write_unlock_irqrestore(&EGroup->EgiLock, Flags);
            return EV_NOERR;
        }
        Member = Member->EslNext;
    }

    write_unlock_irqrestore(&EGroup->EgiLock, Flags);

    return -EV_ERROR_MEMBER_EXIST;
}
Example #22
0
/* *
 * serial_open - open serial device
 * @tty: tty device
 * @filp: file structure
 *
 * Called to open serial device.
 */
static int serial_open (struct tty_struct *tty, struct file *filp)
{
	unsigned long flags;
	int n = 0, rc = 0;
	struct serproto_dev *device = NULL;

	dbg_oc (3, "tty #%p file #%p", tty, filp);

	if (NULL == tty || 0 > (n = MINOR (tty->device) - tty->driver.minor_start) ||
	    n >= serproto_devices || NULL == (device = serproto_device_array[n])) {
		dbg_oc (1, "FAIL ENODEV");
		return -ENODEV;
	}

	MOD_INC_USE_COUNT;
	dbg_init (1, "OPEN uc=%d", GET_USE_COUNT (THIS_MODULE));
	write_lock_irqsave (&device->rwlock, flags);

	if (1 == ++device->opencnt) {
		// First open
		tty->driver_data = device;
		device->tty = tty;
		tty->low_latency = 1;


		/* force low_latency on so that our tty_push actually forces the data through, 
		 * otherwise it is scheduled, and with high data rates (like with OHCI) data
		 * can get lost. 
		 * */
		tty->low_latency = 1;

	} else if (tty->driver_data != device || device->tty != tty) {
		// Second or later open, different tty/device combo
		rc = -EBUSY;
	}
	// XXX Should extract info from somewhere to see if receive is OK
	write_unlock_irqrestore (&device->rwlock, flags);

	if (0 != rc) {
		if (-EBUSY == rc) {
			dbg_oc (1, "2nd, conflict: old dev #%p new #%p, old tty #%p new #%p",
				tty->driver_data, device, device->tty, tty);
		}
		MOD_DEC_USE_COUNT;
		dbg_init (0, "OPEN rc=%d uc=%d", rc, GET_USE_COUNT (THIS_MODULE));
	}
	dbg_oc (3, "->%d n=%d", rc, n);
	return (rc);
}
Example #23
0
static int bfusb_close(struct hci_dev *hdev)
{
	struct bfusb_data *data = hci_get_drvdata(hdev);
	unsigned long flags;

	BT_DBG("hdev %p bfusb %p", hdev, data);

	write_lock_irqsave(&data->lock, flags);
	write_unlock_irqrestore(&data->lock, flags);

	bfusb_unlink_urbs(data);
	bfusb_flush(hdev);

	return 0;
}
Example #24
0
/**
 * mlx4_en_phc_settime - Set the current time on the hardware clock
 * @ptp: ptp clock structure
 * @ts: timespec containing the new time for the cycle counter
 *
 * Reset the timecounter to use a new base value instead of the kernel
 * wall timer value.
 **/
static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
			       const struct timespec64 *ts)
{
	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
						ptp_clock_info);
	u64 ns = timespec64_to_ns(ts);
	unsigned long flags;

	/* reset the timecounter */
	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_init(&mdev->clock, &mdev->cycles, ns);
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	return 0;
}
Example #25
0
static enum MHI_STATUS process_m3_transition(
		struct mhi_device_ctxt *mhi_dev_ctxt,
		enum STATE_TRANSITION cur_work_item)
{
	unsigned long flags;
	mhi_log(MHI_MSG_INFO,
			"Processing M3 state transition\n");
	write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
	mhi_dev_ctxt->flags.pending_M3 = 0;
	wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
	write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->counters.m0_m3++;
	return MHI_STATUS_SUCCESS;
}
/**
 * mlx4_en_phc_adjtime - Shift the time of the hardware clock
 * @ptp: ptp clock structure
 * @delta: Desired change in nanoseconds
 *
 * Adjust the timer by resetting the timecounter structure.
 **/
static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
						ptp_clock_info);
	unsigned long flags;
	s64 now;

	write_lock_irqsave(&mdev->clock_lock, flags);
	now = timecounter_read(&mdev->clock);
	now += delta;
	timecounter_init(&mdev->clock, &mdev->cycles, now);
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	return 0;
}
void psb_fence_error(struct drm_device *dev,
		     uint32_t fence_class,
		     uint32_t sequence, uint32_t type, int error)
{
	struct drm_psb_private *dev_priv = psb_priv(dev);
	struct ttm_fence_device *fdev = &dev_priv->fdev;
	unsigned long irq_flags;
	struct ttm_fence_class_manager *fc =
				&fdev->fence_class[fence_class];

	BUG_ON(fence_class >= PSB_NUM_ENGINES);
	write_lock_irqsave(&fc->lock, irq_flags);
	ttm_fence_handler(fdev, fence_class, sequence, type, error);
	write_unlock_irqrestore(&fc->lock, irq_flags);
}
Example #28
0
/**
 * mlx4_en_phc_gettime - Reads the current time from the hardware clock
 * @ptp: ptp clock structure
 * @ts: timespec structure to hold the current time value
 *
 * Read the timecounter and return the correct value in ns after converting
 * it into a struct timespec.
 **/
static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
			       struct timespec64 *ts)
{
	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
						ptp_clock_info);
	unsigned long flags;
	u64 ns;

	write_lock_irqsave(&mdev->clock_lock, flags);
	ns = timecounter_read(&mdev->clock);
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	*ts = ns_to_timespec64(ns);

	return 0;
}
Example #29
0
static void i915_fence_flush(struct drm_device *dev,
			     uint32_t fence_class)
{
	struct drm_i915_private *dev_priv = 
		(struct drm_i915_private *) dev->dev_private;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[0];
	unsigned long irq_flags;

	if (unlikely(!dev_priv))
		return;

	write_lock_irqsave(&fm->lock, irq_flags);
	i915_initiate_rwflush(dev_priv, fc);
	write_unlock_irqrestore(&fm->lock, irq_flags);
}
Example #30
0
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
{
    unsigned long flags;

    zfcp_erp_adapter_block(adapter, clear);
    zfcp_scsi_schedule_rports_block(adapter);

    write_lock_irqsave(&adapter->erp_lock, flags);
    if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
        zfcp_erp_set_adapter_status(adapter,
                                    ZFCP_STATUS_COMMON_ERP_FAILED);
    else
        zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
                                NULL, NULL, id, 0);
    write_unlock_irqrestore(&adapter->erp_lock, flags);
}