static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
	unsigned long flags;

	read_lock_irqsave(&zfcp_data.config_lock, flags);
	read_lock(&adapter->erp_lock);
	if (list_empty(&adapter->erp_ready_head) &&
	    list_empty(&adapter->erp_running_head)) {
			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
					  &adapter->status);
			wake_up(&adapter->erp_done_wqh);
	}
	read_unlock(&adapter->erp_lock);
	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
/**
 * adf_interface_modelist - get interface's modelist
 *
 * @intf: the interface
 * @modelist: storage for the modelist (optional)
 * @n_modes: length of @modelist
 *
 * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
 * modelist entries into @modelist.
 *
 * Returns the length of the modelist.
 */
size_t adf_interface_modelist(struct adf_interface *intf,
                              struct drm_mode_modeinfo *modelist, size_t n_modes)
{
    unsigned long flags;
    size_t retval;

    read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
    if (modelist)
        memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
               min(n_modes, intf->n_modes));
    retval = intf->n_modes;
    read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);

    return retval;
}
Exemple #3
0
int
EvSubscribeGroupEvents(EvUserID_t userID,
                       EvGroupID_t groupID, EvAccess_t accessCode,
                       int (*kernelCB)(EvUserID_t, int, EvGroupID_t,
                                       EvGroupID_t, EvClassID_t, EvEventID_t,
                                       int, int, int, int, int, void *))
{
    EvKernelInfo_t *EventUser;
    EvGroupInfo_t *EGroup;
    unsigned long Flags;

    read_lock_irqsave(&EvUsersLock, Flags);
    if ((EventUser = EvCheckUser(userID)) == NULL) {
        read_unlock_irqrestore(&EvUsersLock, Flags);
        return -EV_ERROR_USER_EXISTS;
    }

    write_lock(&EvGroupLock);
    read_unlock(&EvUsersLock);

    if ((EGroup = EvGetGroupBase(groupID)) == NULL) {
        write_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_GROUP_EXIST;
    }

    /* Check the access code for the group. */
    if (EvCheckAccessCode(EGroup->EgiAccessCode, accessCode)) {
        write_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_GROUP_ACCESS;
    }

    /* Check that there are no current control processes. */
    if (EGroup->EgiGroupDest.EdID != 0) {
        write_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_GROUP_BUSY;
    }

    EGroup->EgiGroupDest.EdPri = 0;		/* Not used. */
    EGroup->EgiGroupDest.EdID = userID;
    EGroup->EgiGroupDest.EdUinfo = NULL;
    EGroup->EgiGroupDest.EdCB = kernelCB;
    EGroup->EgiGroupDest.EdKinfo = EventUser;

    EGroup->EgiUseCount++;

    write_unlock_irqrestore(&EvGroupLock, Flags);
    return EV_NOERR;
}
Exemple #4
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

	if (!rdma_is_port_valid(device, port_num))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
/**
 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
 * @port: pointer to port to search for unit
 * @fcp_lun: FCP LUN to search for
 *
 * Returns: pointer to zfcp_unit or NULL
 */
struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
{
	unsigned long flags;
	struct zfcp_unit *unit;

	read_lock_irqsave(&port->unit_list_lock, flags);
	list_for_each_entry(unit, &port->unit_list, list)
		if (unit->fcp_lun == fcp_lun) {
			if (!get_device(&unit->dev))
				unit = NULL;
			read_unlock_irqrestore(&port->unit_list_lock, flags);
			return unit;
		}
	read_unlock_irqrestore(&port->unit_list_lock, flags);
	return NULL;
}
Exemple #6
0
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
static int mt_sched_debug_show(struct seq_file *m, void *v)
{
    struct task_struct *g, *p;
    unsigned long flags;
    SEQ_printf(m, "=== mt Scheduler Profiling ===\n");
    SEQ_printf(m,
	    "\nrunnable tasks:\n"
	    "            task   PID   switches  prio"
	    "     exec-runtime         sum-exec        sum-sleep\n"
	    "------------------------------------------------------"
	    "----------------------------------------------------\n");
    read_lock_irqsave(&tasklist_lock, flags);

    do_each_thread(g, p) {
	print_task(m, p);
    } while_each_thread(g, p);
Exemple #8
0
static ssize_t ksb_debug_show(struct seq_file *s, void *unused)
{
	unsigned long		flags;
	struct ks_bridge	*ksb = s->private;
	int			i;

	read_lock_irqsave(&ksb->dbg_lock, flags);
	for (i = 0; i < DBG_MAX_MSG; i++) {
		if (i == (ksb->dbg_idx - 1))
			seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
		else
			seq_printf(s, "%s\n", ksb->dbgbuf[i]);
	}
	read_unlock_irqrestore(&ksb->dbg_lock, flags);

	return 0;
}
Exemple #9
0
/*	Care must be taken to only invoke hp_sdc_spin_ibf when 
 *	absolutely needed, or in rarely invoked subroutines.  
 *	Not only does it waste CPU cycles, it also wastes bus cycles. 
 */
static inline void hp_sdc_spin_ibf(void) {
	unsigned long flags;
	rwlock_t *lock;

	lock = &hp_sdc.ibf_lock;

	read_lock_irqsave(lock, flags);
	if (!hp_sdc.ibf) {
		read_unlock_irqrestore(lock, flags);
		return;
	}
	read_unlock(lock);
	write_lock(lock);
	while (sdc_readb(hp_sdc.status_io) & HP_SDC_STATUS_IBF) {};
	hp_sdc.ibf = 0;
	write_unlock_irqrestore(lock, flags);
}
Exemple #10
0
/**
 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
 * @adapter: pointer to adapter to search for port
 * @wwpn: wwpn to search for
 *
 * Returns: pointer to zfcp_port or NULL
 */
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
					u64 wwpn)
{
	unsigned long flags;
	struct zfcp_port *port;

	read_lock_irqsave(&adapter->port_list_lock, flags);
	list_for_each_entry(port, &adapter->port_list, list)
		if (port->wwpn == wwpn) {
			if (!get_device(&port->dev))
				port = NULL;
			read_unlock_irqrestore(&adapter->port_list_lock, flags);
			return port;
		}
	read_unlock_irqrestore(&adapter->port_list_lock, flags);
	return NULL;
}
Exemple #11
0
/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
	struct safe_buffer *b, *rb = NULL;
	unsigned long flags;

	read_lock_irqsave(&device_info->lock, flags);

	list_for_each_entry(b, &device_info->safe_buffers, node)
		if (b->safe_dma_addr == safe_dma_addr) {
			rb = b;
			break;
		}

	read_unlock_irqrestore(&device_info->lock, flags);
	return rb;
}
Exemple #12
0
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
			    struct skb_shared_hwtstamps *hwts,
			    uint64_t timestamp)
{
	panic("Disabled");
#if 0 // AKAROS_PORT
	unsigned long flags;
	uint64_t nsec;

	read_lock_irqsave(&mdev->clock_lock, flags);
	nsec = timecounter_cyc2time(&mdev->clock, timestamp);
	read_unlock_irqrestore(&mdev->clock_lock, flags);

	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	hwts->hwtstamp = ns_to_ktime(nsec);
#endif
}
Exemple #13
0
int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
				u64              *sn_pfx)
{
	unsigned long flags;
	int p;

	if (!rdma_is_port_valid(device, port_num))
		return -EINVAL;

	p = port_num - rdma_start_port(device);
	read_lock_irqsave(&device->cache.lock, flags);
	*sn_pfx = device->cache.ports[p].subnet_prefix;
	read_unlock_irqrestore(&device->cache.lock, flags);

	return 0;
}
Exemple #14
0
/**
 * mipv6_bcache_exists - check if entry exists
 * @home_addr: address to check
 *
 * Determines if a binding exists for @home_addr.  Returns type of the
 * entry or negative if entry does not exist.
 **/
int mipv6_bcache_exists(struct in6_addr *home_addr)
{
	unsigned long flags;
	struct mipv6_bcache_entry *entry;

	DEBUG_FUNC();

	if (home_addr == NULL) return -1;

	read_lock_irqsave(&bcache->lock, flags);
	entry = (struct mipv6_bcache_entry *)
		hashlist_get(bcache->entries, home_addr);
	read_unlock_irqrestore(&bcache->lock, flags);

	if(entry == NULL) return -1;

	return entry->type;
}
static int ehci_hsic_msm_ctrl_events_show(struct seq_file *s, void *unused)
{
	unsigned long	flags;
	unsigned	i;

	read_lock_irqsave(&dbg_hsic_ctrl.lck, flags);

	i = dbg_hsic_ctrl.idx;
	for (dbg_inc(&i); i != dbg_hsic_ctrl.idx; dbg_inc(&i)) {
		if (!strnlen(dbg_hsic_ctrl.buf[i], DBG_MSG_LEN))
			continue;
		seq_printf(s, "%s\n", dbg_hsic_ctrl.buf[i]);
	}

	read_unlock_irqrestore(&dbg_hsic_ctrl.lck, flags);

	return 0;
}
Exemple #16
0
int
pm_send_to_host(PM_MESSAGE opcode, void *msg, size_t len)
{
//	FUNCTION_ENTRY;
	int err = 0;
	size_t psize = sizeof(pm_msg_header) + len;
	char *payload;
	unsigned long flags;

	if (pm_scif->con_state != PM_CONNECTED) {
		err = -EINVAL;
		goto error;
	}

	if (!(payload = kmalloc(psize, GFP_ATOMIC))) {
		err = -ENOMEM;
		goto error;
	}
	read_lock_irqsave(&pmscif_send,flags);

	if (atomic_xchg(&epinuse,1) != 0) {
		read_unlock_irqrestore(&pmscif_send,flags);
		kfree(payload);
		return -1;
	}

	((pm_msg_header*)payload)->opcode = opcode;
	((pm_msg_header*)payload)->len = len;
	if (len)
		memcpy((char*)payload + sizeof(pm_msg_header), msg, len);

	//0 for non blocking
	if ((err = scif_send(pm_scif->ep, payload, psize, 0)) < 0) {
		PM_DB("scif_recv failed\n");
	}
	atomic_set(&epinuse,0);
	//for (i = 0; i < psize; i++)
	//	printk(KERN_ALERT" buff: %X\n", payload[i]);
	read_unlock_irqrestore(&pmscif_send,flags);
	kfree(payload);
//	FUNCTION_EXIT;
error:
	return err;
}
Exemple #17
0
/*
 * This version of gettimeofday has near microsecond resolution.
 */
void do_gettimeofday(struct timeval *tv)
{
	unsigned long flags;
	unsigned long usec, sec;

	read_lock_irqsave(&xtime_lock, flags);
	usec = 0;
	sec = xtime.tv_sec;
	usec += (xtime.tv_nsec / 1000);
	read_unlock_irqrestore(&xtime_lock, flags);

	while (usec >= 1000000) {
		usec -= 1000000;
		sec++;
	}

	tv->tv_sec = sec;
	tv->tv_usec = usec;
}
Exemple #18
0
// Get map_entry if the map is present in the map hash table.
// Returns NULL if not present. Takes a read lock on __stp_tf_map_lock.
static struct __stp_tf_map_entry *
__stp_tf_get_map_entry(struct task_struct *tsk)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct __stp_tf_map_entry *entry;

	unsigned long flags;
	read_lock_irqsave(&__stp_tf_map_lock, flags);
	head = &__stp_tf_map_table[__stp_tf_map_hash(tsk)];
	hlist_for_each_entry(entry, node, head, hlist) {
		if (tsk->pid == entry->pid) {
			read_unlock_irqrestore(&__stp_tf_map_lock, flags);
			return entry;
		}
	}
	read_unlock_irqrestore(&__stp_tf_map_lock, flags);
	return NULL;
}
Exemple #19
0
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc)
{
    unsigned long flags;
    eg_cache_entry *entry;

    read_lock_irqsave(&mpc->egress_lock, flags);
    entry = mpc->eg_cache;
    while (entry != NULL) {
        if (entry->shortcut == vcc) {
            atomic_inc(&entry->use);
            read_unlock_irqrestore(&mpc->egress_lock, flags);
            return entry;
        }
        entry = entry->next;
    }
    read_unlock_irqrestore(&mpc->egress_lock, flags);

    return NULL;
}
Exemple #20
0
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
{
    unsigned long flags;
    eg_cache_entry *entry;

    read_lock_irqsave(&mpc->egress_lock, flags);
    entry = mpc->eg_cache;
    while (entry != NULL) {
        if (entry->ctrl_info.tag == tag) {
            atomic_inc(&entry->use);
            read_unlock_irqrestore(&mpc->egress_lock, flags);
            return entry;
        }
        entry = entry->next;
    }
    read_unlock_irqrestore(&mpc->egress_lock, flags);

    return NULL;
}
Exemple #21
0
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
	int partial_ix = -1;

	if (port_num < start_port(device) || port_num > end_port(device))
		return -EINVAL;

	*index = -1;

	read_lock_irqsave(&device->cache.lock, flags);

	if (!device->cache.pkey_cache)
		goto out;

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	if (!cache)
		goto out;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
		}

	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}
out:
	read_unlock_irqrestore(&device->cache.lock, flags);
	return ret;
}
/* debugfs: cpu monitor (for all) */
static int debugfs_cpu_all_show(struct seq_file *m, void *v){
	struct sysinfo_snapshot *p;
	unsigned long flags;
	int i;
	u64 sum_user, sum_nice, sum_system, sum_idle, sum_iowait, sum_irq, sum_softirq;

	read_lock_irqsave(&sysinfo_snapshot_lock, flags);
	seq_printf(m, "\"epoch time\",\"user\",\"nice\",\"system\",\"idle\",\"iowait\",\"irq\",\"soft irq\"\n");
	for(p = snapshot_head; p != NULL; p = p->next){
		sum_user    = 0;
		sum_nice    = 0;
		sum_system  = 0;
		sum_idle    = 0;
		sum_iowait  = 0;
		sum_irq     = 0;
		sum_softirq = 0;
		for(i = 0; i < p->num_cpu; i++){
			sum_user    += p->cpuinfo_list[i].user;
			sum_nice    += p->cpuinfo_list[i].nice;
			sum_system  += p->cpuinfo_list[i].system;
			sum_idle    += p->cpuinfo_list[i].idle;
			sum_iowait  += p->cpuinfo_list[i].iowait;
			sum_irq     += p->cpuinfo_list[i].irq;
			sum_softirq += p->cpuinfo_list[i].softirq;
		}

		seq_printf(m, "%lld,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
			   p->epoch_time,
			   sum_user,
			   sum_nice,
			   sum_system,
			   sum_idle,
			   sum_iowait,
			   sum_irq,
			   sum_softirq);
	}


	read_unlock_irqrestore(&sysinfo_snapshot_lock,flags);

	return 0;
}
static int debugfs_memory_show(struct seq_file *m, void *v){
	struct sysinfo_snapshot *p;
	unsigned long flags;
	read_lock_irqsave(&sysinfo_snapshot_lock, flags);
	seq_printf(m, "\"epoch time\",\"Total RAM\",\"Free RAM\",\"Shared RAM\",\"Cached RAM\",\"Buffered RAM\",\"Total swap\",\"Free swap\"\n");
	for(p = snapshot_head; p != NULL; p = p->next){
		seq_printf(m, "%lld,%lu,%lu,%lu,%lu,%lu,%lu,%lu\n",
			   p->epoch_time,
			   K(p->total_ram),
			   K(p->free_ram),
			   K(p->shared_ram),
			   K(p->cached_ram),
			   K(p->buffer_ram),
			   K(p->total_swap),
			   K(p->free_swap)
			);
	}
	read_unlock_irqrestore(&sysinfo_snapshot_lock,flags);
	return 0;
}
Exemple #24
0
/**
 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
 * which must be in UP state.
 *
 * @attr:Pointer to the GID attribute
 *
 * Returns pointer to netdevice if the netdevice was attached to GID and
 * netdevice is in UP state. Caller must hold RCU lock as this API
 * reads the netdev flags which can change while netdevice migrates to
 * different net namespace. Returns ERR_PTR with error code otherwise.
 *
 */
struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
{
	struct ib_gid_table_entry *entry =
			container_of(attr, struct ib_gid_table_entry, attr);
	struct ib_device *device = entry->attr.device;
	struct net_device *ndev = ERR_PTR(-ENODEV);
	u8 port_num = entry->attr.port_num;
	struct ib_gid_table *table;
	unsigned long flags;
	bool valid;

	table = rdma_gid_table(device, port_num);

	read_lock_irqsave(&table->rwlock, flags);
	valid = is_gid_entry_valid(table->data_vec[attr->index]);
	if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
		ndev = attr->ndev;
	read_unlock_irqrestore(&table->rwlock, flags);
	return ndev;
}
Exemple #25
0
MHI_STATUS mhi_client_recycle_trb(mhi_client_handle *client_handle)
{
	unsigned long flags;
	u32 chan = client_handle->chan;
	MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	struct mutex *chan_mutex  = &mhi_dev_ctxt->mhi_chan_mutex[chan];
	mhi_ring *local_ctxt = NULL;
	u64 db_value;
	local_ctxt = &client_handle->mhi_dev_ctxt->mhi_local_chan_ctxt[chan];

	mutex_lock(chan_mutex);
	MHI_TX_TRB_SET_LEN(TX_TRB_LEN,
				(mhi_xfer_pkt *)local_ctxt->ack_rp,
				TRB_MAX_DATA_SIZE);

	*(mhi_xfer_pkt *)local_ctxt->wp =
			*(mhi_xfer_pkt *)local_ctxt->ack_rp;
	ret_val = delete_element(local_ctxt, &local_ctxt->ack_rp,
				&local_ctxt->rp, NULL);
	ret_val = ctxt_add_element(local_ctxt, NULL);
	db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
					(uintptr_t)local_ctxt->wp);
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	if (mhi_dev_ctxt->flags.link_up) {
		if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
		    MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) {
			mhi_assert_device_wake(mhi_dev_ctxt);
			MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, chan, db_value);
		} else if (mhi_dev_ctxt->flags.pending_M3 ||
			   mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
				mhi_wake_dev_from_m3(mhi_dev_ctxt);
		}
	}
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
	mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++;
	mutex_unlock(chan_mutex);
	return ret_val;
}
void start_record_task(void)
{
    unsigned long long ts;

    struct task_struct *g, *p;
    unsigned long flags;
    int cpu = 0;
    mtsched_enabled = 1;
    prof_start_ts = sched_clock();
    cpu0_idletime_start = mtprof_get_cpu_idle(cpu);// cpu'0', notified SMP
    cpu0_iowait_start = mtprof_get_cpu_iowait(cpu); 
#ifdef CONFIG_SMP
	cpu1_idletime_start = mtprof_get_cpu_idle(1);
	cpu1_iowait_start = mtprof_get_cpu_iowait(1);
#endif
    ts = sched_clock();
//    for_each_online_cpu(cpu){
	read_lock_irqsave(&tasklist_lock, flags);
	do_each_thread(g, p) {
	    setup_mtproc_info(p, ts);
	} while_each_thread(g, p);
Exemple #27
0
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
	struct task_struct *g, *p;
	unsigned long flags;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
	"------------------------------------------------------"
	"----------------------------------------------------\n");

	read_lock_irqsave(&tasklist_lock, flags);
	for_each_process_thread(g, p) {
		if (!p->on_rq || task_cpu(p) != rq_cpu)
			continue;

		print_task(m, rq, p);
	}
	read_unlock_irqrestore(&tasklist_lock, flags);
}
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp,
			struct skb_shared_hwtstamps *hwts,
			u64 timestamp)
{
#if defined (HAVE_PTP_CLOCK_INFO) && (defined (CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
	unsigned long flags;
	u64 nsec;

	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	if (!tstamp->ptp)
		return;

	read_lock_irqsave(&tstamp->lock, flags);
	nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
	read_unlock_irqrestore(&tstamp->lock, flags);

	hwts->hwtstamp = ns_to_ktime(nsec);
#else
	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
#endif
}
Exemple #29
0
static void octeon_irq_ciu1_enable(unsigned int irq)
{
	int coreid = cvmx_get_core_num();
	unsigned long flags;
	uint64_t en1;
	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */

	/*
	 * A read lock is used here to make sure only one core is ever
	 * updating the CIU enable bits at a time.  During an enable
	 * the cores don't interfere with each other.  During a disable
	 * the write lock stops any enables that might cause a
	 * problem.
	 */
	read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
	en1 |= 1ull << bit;
	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
	read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
}
Exemple #30
0
static bool driver_filter(struct device *dev)
{
	struct device_driver *drv;
	unsigned long flags;
	bool ret;

	/* driver filter off */
	if (likely(!current_driver_name[0]))
		return true;

	/* driver filter on and initialized */
	if (current_driver && dev && dev->driver == current_driver)
		return true;

	/* driver filter on, but we can't filter on a NULL device... */
	if (!dev)
		return false;

	if (current_driver || !current_driver_name[0])
		return false;

	/* driver filter on but not yet initialized */
	drv = dev->driver;
	if (!drv)
		return false;

	/* lock to protect against change of current_driver_name */
	read_lock_irqsave(&driver_name_lock, flags);

	ret = false;
	if (drv->name &&
	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
		current_driver = drv;
		ret = true;
	}

	read_unlock_irqrestore(&driver_name_lock, flags);

	return ret;
}