/*************** primitives for use in any context *********************/ static inline uint8_t hp_sdc_status_in8 (void) { uint8_t status; unsigned long flags; write_lock_irqsave(&hp_sdc.ibf_lock, flags); status = sdc_readb(hp_sdc.status_io); if (!(status & HP_SDC_STATUS_IBF)) hp_sdc.ibf = 0; write_unlock_irqrestore(&hp_sdc.ibf_lock, flags); return status; }
OS_VOID OS_RWLockWriteRelease( OS_RWLOCK* aLock, OS_RWLOCK_FLAG* aFlag ) { rwlock_t* lRWLock = ( rwlock_t * ) aLock->Space; #ifdef OS_TAG_ENABLED OS_ASSERT( aTag != OS_TAG_NOTAG ); aLock->Tag = OS_TAG_NOTAG; #endif // OS_TAG_ENABLED write_unlock_irqrestore( lRWLock, ( unsigned long ) *aFlag ); }
/** * cpu_pm_unregister_notifier - unregister a driver with cpu_pm * @nb: notifier block to be unregistered * * Remove a driver from the CPU PM notifier list. * * This function may sleep, and has the same return conditions as * raw_notifier_chain_unregister. */ int cpu_pm_unregister_notifier(struct notifier_block *nb) { unsigned long flags; int ret; write_lock_irqsave(&cpu_pm_notifier_lock, flags); ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); return ret; }
int ptrace_attach(struct task_struct *task) { int retval; unsigned long flags; audit_ptrace(task); retval = -EPERM; if (same_thread_group(task, current)) goto out; repeat: /* * Nasty, nasty. * * We want to hold both the task-lock and the * tasklist_lock for writing at the same time. * But that's against the rules (tasklist_lock * is taken for reading by interrupts on other * cpu's that may have task_lock). */ task_lock(task); if (!write_trylock_irqsave(&tasklist_lock, flags)) { task_unlock(task); do { cpu_relax(); } while (!write_can_lock(&tasklist_lock)); goto repeat; } if (!task->mm) goto bad; /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED) goto bad; retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); if (retval) goto bad; /* Go */ task->ptrace |= PT_PTRACED; if (capable_nolog(CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); bad: write_unlock_irqrestore(&tasklist_lock, flags); task_unlock(task); out: return retval; }
void psb_fence_handler(struct drm_device *dev, uint32_t fence_class) { struct drm_psb_private *dev_priv = psb_priv(dev); struct ttm_fence_device *fdev = &dev_priv->fdev; struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; unsigned long irq_flags; write_lock_irqsave(&fc->lock, irq_flags); psb_fence_poll(fdev, fence_class, fc->waiting_types); write_unlock_irqrestore(&fc->lock, irq_flags); }
/** * zfcp_erp_notify - Trigger ERP action. * @erp_action: ERP action to continue. * @set_mask: ERP action status flags to set. */ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) { struct zfcp_adapter *adapter = erp_action->adapter; unsigned long flags; write_lock_irqsave(&adapter->erp_lock, flags); if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { erp_action->status |= set_mask; zfcp_erp_action_ready(erp_action); } write_unlock_irqrestore(&adapter->erp_lock, flags); }
/** * mlx4_en_phc_adjtime - Shift the time of the hardware clock * @ptp: ptp clock structure * @delta: Desired change in nanoseconds * * Adjust the timer by resetting the timecounter structure. **/ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, ptp_clock_info); unsigned long flags; write_lock_irqsave(&mdev->clock_lock, flags); timecounter_adjtime(&mdev->clock, delta); write_unlock_irqrestore(&mdev->clock_lock, flags); return 0; }
int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) { int retval; unsigned long flags; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); retval = _zfcp_erp_port_reopen(port, clear, id); write_unlock_irqrestore(&adapter->erp_lock, flags); return retval; }
static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); unsigned long flags; write_lock_irqsave(&tstamp->lock, flags); timecounter_adjtime(&tstamp->clock, delta); write_unlock_irqrestore(&tstamp->lock, flags); return 0; }
static void mlx5e_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); unsigned long flags; write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); write_unlock_irqrestore(&tstamp->lock, flags); schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); }
void psb_fence_error(struct drm_device *dev, uint32_t fence_class, uint32_t sequence, uint32_t type, int error) { struct drm_fence_manager *fm = &dev->fm; unsigned long irq_flags; BUG_ON(fence_class >= PSB_NUM_ENGINES); write_lock_irqsave(&fm->lock, irq_flags); drm_fence_handler(dev, fence_class, sequence, type, error); write_unlock_irqrestore(&fm->lock, irq_flags); }
void nlm_common_free_msi_handler(int *index) { unsigned long flags=0; write_lock_irqsave(&nlm_msi_rw_lock, flags); msi_desc[*index].valid = 0; write_unlock_irqrestore(&nlm_msi_rw_lock, flags); nlm_common_disable_msi(index); Message("\nIndex %d Freed\n",*index); return; }
static void reg_mr_callback(int status, void *context) { struct mlx5_ib_mr *mr = context; struct mlx5_ib_dev *dev = mr->dev; struct mlx5_mr_cache *cache = &dev->cache; int c = order2idx(dev, mr->order); struct mlx5_cache_ent *ent = &cache->ent[c]; u8 key; unsigned long flags; struct mlx5_mr_table *table = &dev->mdev->priv.mr_table; int err; spin_lock_irqsave(&ent->lock, flags); ent->pending--; spin_unlock_irqrestore(&ent->lock, flags); if (status) { mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status); kfree(mr); dev->fill_delay = 1; mod_timer(&dev->delay_timer, jiffies + HZ); return; } if (mr->out.hdr.status) { mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n", mr->out.hdr.status, be32_to_cpu(mr->out.hdr.syndrome)); kfree(mr); dev->fill_delay = 1; mod_timer(&dev->delay_timer, jiffies + HZ); return; } spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); key = dev->mdev->priv.mkey_key++; spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; cache->last_add = jiffies; spin_lock_irqsave(&ent->lock, flags); list_add_tail(&mr->list, &ent->head); ent->cur++; ent->size++; spin_unlock_irqrestore(&ent->lock, flags); write_lock_irqsave(&table->lock, flags); err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key), &mr->mmr); if (err) pr_err("Error inserting to mr tree. 0x%x\n", -err); write_unlock_irqrestore(&table->lock, flags); }
int EvRemoteUnRegisterEventClass(EvGroupID_t groupID, EvClassID_t classID, EvAccess_t accessCode, int error) { EvGroupInfo_t *EGroup; EvPendRem_t *Pend; unsigned long Flags; int RetVal; read_lock_irqsave(&EvGroupLock, Flags); if ((EGroup = EvGetGroupBase(groupID)) == NULL) { read_unlock_irqrestore(&EvGroupLock, Flags); return -EV_ERROR_GROUP_EXIST; } write_lock(&EGroup->EgiLock); read_unlock(&EvGroupLock); /* Pend the pending control struct for this ID. * * If Pend is NULL then this is the master reporting a new event * class. */ if ((Pend = EvFindPendEntry(EGroup, EV_REM_UNREGISTER_EVENT_CLASS, accessCode, classID, 0, 0, 0, NULL)) != NULL) { /* Fill in the return values. */ Pend->PrRetInfo.EpInfo[0] = error; /* Wake up the requester. */ wake_up_interruptible(&Pend->PrWaitQ); write_unlock_irqrestore(&EGroup->EgiLock, Flags); return EV_NOERR; } RetVal = EvInternalUnRegisterEventClass(EGroup, classID, accessCode); write_unlock_irqrestore(&EGroup->EgiLock, Flags); return RetVal; }
/* * Set maximum number of queued events for a userID. */ int EvSetQLimit(EvUserID_t userID, unsigned int newQLimit) { EvKernelInfo_t *TmpUser; unsigned long Flags; write_lock_irqsave(&EvUsersLock, Flags); TmpUser = EvUsersHead; while (TmpUser) { if (TmpUser->EkiID == userID) { TmpUser->EkiQLimit = newQLimit; write_unlock_irqrestore(&EvUsersLock, Flags); return EV_NOERR; } TmpUser = TmpUser->EkiNext; } write_unlock_irqrestore(&EvUsersLock, Flags); return -EV_ERROR_USER_EXISTS; }
void mlx5e_ptp_overflow_check(struct mlx5e_priv *priv) { bool timeout = time_is_before_jiffies(priv->tstamp.last_overflow_check + priv->tstamp.overflow_period); unsigned long flags; if (timeout) { write_lock_irqsave(&priv->tstamp.lock, flags); timecounter_read(&priv->tstamp.clock); write_unlock_irqrestore(&priv->tstamp.lock, flags); priv->tstamp.last_overflow_check = jiffies; } }
// __stp_tf_map_initialize(): Initialize the free list. Grabs the // lock. static void __stp_tf_map_initialize(void) { int i; struct hlist_head *head = &__stp_tf_map_free_list[0]; unsigned long flags; write_lock_irqsave(&__stp_tf_map_lock, flags); for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) { hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head); } write_unlock_irqrestore(&__stp_tf_map_lock, flags); }
/** * ccp_add_device - add a CCP device to the list * * @ccp: ccp_device struct pointer * * Put this CCP on the unit list, which makes it available * for use. * * Returns zero if a CCP device is present, -ENODEV otherwise. */ void ccp_add_device(struct ccp_device *ccp) { unsigned long flags; write_lock_irqsave(&ccp_unit_lock, flags); list_add_tail(&ccp->entry, &ccp_units); if (!ccp_rr) /* We already have the list lock (we're first) so this * pointer can't change on us. Set its initial value. */ ccp_rr = ccp; write_unlock_irqrestore(&ccp_unit_lock, flags); }
void rxe_pool_cleanup(struct rxe_pool *pool) { unsigned long flags; write_lock_irqsave(&pool->pool_lock, flags); pool->state = RXE_POOL_STATE_INVALID; if (atomic_read(&pool->num_elem) > 0) pr_warn("%s pool destroyed with unfree'd elem\n", pool_name(pool)); write_unlock_irqrestore(&pool->pool_lock, flags); rxe_pool_put(pool); }
void insert_sysinfo_snapshot(struct sysinfo_snapshot *target){ unsigned long flags; write_lock_irqsave(&sysinfo_snapshot_lock, flags); if(snapshot_head == NULL){ snapshot_head = target; } else{ snapshot_tail->next = target; } snapshot_tail = target; write_unlock_irqrestore(&sysinfo_snapshot_lock, flags); return; }
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) { bool timeout = time_is_before_jiffies(mdev->last_overflow_check + mdev->overflow_period); unsigned long flags; if (timeout) { write_lock_irqsave(&mdev->clock_lock, flags); timecounter_read(&mdev->clock); write_unlock_irqrestore(&mdev->clock_lock, flags); mdev->last_overflow_check = jiffies; } }
int EvUnSubscribeGroupEvents(EvUserID_t userID, EvGroupID_t groupID) { EvKernelInfo_t *EventUser; EvGroupInfo_t *EGroup; unsigned long Flags; read_lock_irqsave(&EvUsersLock, Flags); if ((EventUser = EvCheckUser(userID)) == NULL) { read_unlock_irqrestore(&EvUsersLock, Flags); return -EV_ERROR_USER_EXISTS; } write_lock(&EvGroupLock); read_unlock(&EvUsersLock); if ((EGroup = EvGetGroupBase(groupID)) == NULL) { write_unlock_irqrestore(&EvGroupLock, Flags); return -EV_ERROR_GROUP_EXIST; } /* Check that there are no current control processes. */ if (EGroup->EgiGroupDest.EdID != userID) { write_unlock_irqrestore(&EvGroupLock, Flags); return -EV_ERROR_GROUP_ACCESS; } EGroup->EgiGroupDest.EdID = 0; EGroup->EgiGroupDest.EdUinfo = NULL; EGroup->EgiGroupDest.EdCB = NULL; EGroup->EgiGroupDest.EdKinfo = NULL; EGroup->EgiUseCount--; write_unlock_irqrestore(&EvGroupLock, Flags); return EV_NOERR; }
static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); u64 ns = timespec64_to_ns(ts); unsigned long flags; write_lock_irqsave(&tstamp->lock, flags); timecounter_init(&tstamp->clock, &tstamp->cycles, ns); write_unlock_irqrestore(&tstamp->lock, flags); return 0; }
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { panic("Disabled"); #if 0 // AKAROS_PORT struct mlx4_dev *dev = mdev->dev; unsigned long flags; uint64_t ns, zero = 0; rwlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); /* Using shift to make calculation more accurate. Since current HW * clock frequency is 427 MHz, and cycles are given using a 48 bits * register, the biggest shift when calculating using u64, is 14 * (max_cycles * multiplier < 2^64) */ mdev->cycles.shift = 14; mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; write_lock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, epoch_nsec()); write_unlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev); if (IS_ERR(mdev->ptp_clock)) { mdev->ptp_clock = NULL; mlx4_err(mdev, "ptp_clock_register failed\n"); } else { mlx4_info(mdev, "registered PHC clock\n"); } #endif }
/*! ****************************************************************************** @Function LinuxEventObjectAdd @Description Linux wait object addition @Input hOSEventObjectList : Event object list handle @Output phOSEventObject : Pointer to the event object handle @Return PVRSRV_ERROR : Error code ******************************************************************************/ PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) { PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); PVRSRV_PER_PROCESS_DATA *psPerProc; unsigned long ulLockFlags; psPerProc = PVRSRVPerProcessData(ui32PID); if (psPerProc == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data")); return PVRSRV_ERROR_OUT_OF_MEMORY; } /* allocate completion variable */ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), (IMG_VOID **)&psLinuxEventObject, IMG_NULL, "Linux Event Object") != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory ")); return PVRSRV_ERROR_OUT_OF_MEMORY; } INIT_LIST_HEAD(&psLinuxEventObject->sList); atomic_set(&psLinuxEventObject->sTimeStamp, 0); psLinuxEventObject->ui32TimeStampPrevious = 0; #if defined(DEBUG) psLinuxEventObject->ui32Stats = 0; #endif init_waitqueue_head(&psLinuxEventObject->sWait); psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext, RESMAN_TYPE_EVENT_OBJECT, psLinuxEventObject, 0, &LinuxEventObjectDeleteCallback); write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags); list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags); *phOSEventObject = psLinuxEventObject; return PVRSRV_OK; }
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) { unsigned long flags; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; write_lock_irqsave(&adapter->erp_lock, flags); _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF); write_unlock_irqrestore(&adapter->erp_lock, flags); zfcp_erp_wait(adapter); }
int mipv6_bcache_put(struct mipv6_bcache_entry *entry) { unsigned long flags; write_lock_irqsave(&bcache->lock, flags); DEBUG((DBG_INFO, "adding entry: %x", entry)); if (mipv6_tunnel_add(&entry->coa, &entry->our_addr, 0)) { DEBUG((DBG_INFO, "mipv6_bcache_add: no free tunnel devices!")); bcache_proxy_nd_rem(entry); mipv6_bcache_entry_free(entry); write_unlock_irqrestore(&bcache->lock, flags); return -1; } if (mipv6_tunnel_route_add(&entry->home_addr, &entry->coa, &entry->our_addr)) { DEBUG((DBG_INFO, "mipv6_bcache_add: invalid route to home address!")); mipv6_tunnel_del(&entry->coa, &entry->our_addr); bcache_proxy_nd_rem(entry); mipv6_bcache_entry_free(entry); write_unlock_irqrestore(&bcache->lock, flags); return -1; } if ((hashlist_add(bcache->entries, &entry->home_addr, entry->callback_time, entry)) < 0) { mipv6_tunnel_route_del(&entry->home_addr, &entry->coa, &entry->our_addr); mipv6_tunnel_del(&entry->coa, &entry->our_addr); bcache_proxy_nd_rem(entry); mipv6_bcache_entry_free(entry); DEBUG((DBG_ERROR, "Hash add failed")); write_unlock_irqrestore(&bcache->lock, flags); return -1; } set_timer(); write_unlock_irqrestore(&bcache->lock, flags); return 0; }
// Add the map info to the map hash table. Takes a write lock on // __stp_tf_map_lock. static int __stp_tf_add_map(struct task_struct *tsk, long syscall_no, unsigned long arg0, unsigned long arg1, unsigned long arg2) { struct hlist_head *head; struct hlist_node *node; struct __stp_tf_map_entry *entry; unsigned long flags; write_lock_irqsave(&__stp_tf_map_lock, flags); head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)]; hlist_for_each_entry(entry, node, head, hlist) { // If we find an existing entry, just increment the // usage count. if (tsk->pid == entry->pid) { entry->usage++; write_unlock_irqrestore(&__stp_tf_map_lock, flags); return 0; } } // Get an element from the free list. entry = __stp_tf_map_get_free_entry(); if (!entry) { write_unlock_irqrestore(&__stp_tf_map_lock, flags); return -ENOMEM; } entry->usage = 1; entry->pid = tsk->pid; entry->syscall_no = syscall_no; entry->arg0 = arg0; entry->arg1 = arg1; entry->arg2 = arg2; hlist_add_head(&entry->hlist, head); write_unlock_irqrestore(&__stp_tf_map_lock, flags); return 0; }
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; unsigned long flags; u64 ns; /* mlx4_en_init_timestamp is called for each netdev. * mdev->ptp_clock is common for all ports, skip initialization if * was done for other port. */ if (mdev->ptp_clock) return; rwlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; write_lock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); write_unlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev); if (IS_ERR(mdev->ptp_clock)) { mdev->ptp_clock = NULL; mlx4_err(mdev, "ptp_clock_register failed\n"); } else { mlx4_info(mdev, "registered PHC clock\n"); } }
/* * * serial_open - open serial device * @tty: tty device * @filp: file structure * * Called to open serial device. */ static int serial_open (struct tty_struct *tty, struct file *filp) { unsigned long flags; int n = 0, rc = 0; struct serproto_dev *device = NULL; dbg_oc (3, "tty #%p file #%p", tty, filp); if (NULL == tty || 0 > (n = MINOR (tty->device) - tty->driver.minor_start) || n >= serproto_devices || NULL == (device = serproto_device_array[n])) { dbg_oc (1, "FAIL ENODEV"); return -ENODEV; } MOD_INC_USE_COUNT; dbg_init (1, "OPEN uc=%d", GET_USE_COUNT (THIS_MODULE)); write_lock_irqsave (&device->rwlock, flags); if (1 == ++device->opencnt) { // First open tty->driver_data = device; device->tty = tty; tty->low_latency = 1; /* force low_latency on so that our tty_push actually forces the data through, * otherwise it is scheduled, and with high data rates (like with OHCI) data * can get lost. * */ tty->low_latency = 1; } else if (tty->driver_data != device || device->tty != tty) { // Second or later open, different tty/device combo rc = -EBUSY; } // XXX Should extract info from somewhere to see if receive is OK write_unlock_irqrestore (&device->rwlock, flags); if (0 != rc) { if (-EBUSY == rc) { dbg_oc (1, "2nd, conflict: old dev #%p new #%p, old tty #%p new #%p", tty->driver_data, device, device->tty, tty); } MOD_DEC_USE_COUNT; dbg_init (0, "OPEN rc=%d uc=%d", rc, GET_USE_COUNT (THIS_MODULE)); } dbg_oc (3, "->%d n=%d", rc, n); return (rc); }