Ejemplo n.º 1
0
int sce_put4pop(sce_hndl_t scehndl, sce_poptask_t *poptask, int failed)
{
	unsigned long flags;
	sce_t *sce;
	lun_t *lun;
	int    lunidx;
	int    ret;

	if ((!scehndl) || (!poptask))
		return SCE_ERROR;
	sce    = (sce_t *)scehndl;
	lunidx = _lun_search(sce, poptask->lunctx);
	if (lunidx < 0)
		return SCE_ERROR;
	lun = &sce->luntbl[lunidx];

	spin_lock_irqsave(&lun->lock, flags);
	if (!failed) {
		ret = _complete_population(lun, poptask->lun_fragnum);
		atomic64_inc(&lun->stats.populations);
		atomic64_inc(&lun->stats.alloc_sctrs);
		atomic64_add(SCE_SCTRPERFRAG, &lun->stats.valid_sctrs);
	} else {
		ret = _cancel_population(lun, poptask->lun_fragnum);
	}
	spin_unlock_irqrestore(&lun->lock, flags);

	return ret;
}
Ejemplo n.º 2
0
static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb,
				       struct net_device *dev)
{
	struct ccat_eth_priv *const priv = netdev_priv(dev);
	struct ccat_eth_dma_fifo *const fifo = &priv->tx_fifo;
	u32 addr_and_length;

	if (skb_is_nonlinear(skb)) {
		pr_warn("Non linear skb not supported -> drop frame.\n");
		atomic64_inc(&priv->tx_dropped);
		priv->kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (skb->len > sizeof(fifo->next->data)) {
		pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n",
			(u64) skb->len, (u64) sizeof(fifo->next->data));
		atomic64_inc(&priv->tx_dropped);
		priv->kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (!ccat_eth_frame_sent(fifo->next)) {
		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
		priv->stop_queue(priv->netdev);
		return NETDEV_TX_BUSY;
	}

	/* prepare frame in DMA memory */
	fifo->next->tx_flags = cpu_to_le32(0);
	fifo->next->length = cpu_to_le16(skb->len);
	memcpy(fifo->next->data, skb->data, skb->len);

	/* Queue frame into CCAT TX-FIFO, CCAT ignores the first 8 bytes of the tx descriptor */
	addr_and_length = offsetof(struct ccat_eth_frame, length);
	addr_and_length += ((void *)fifo->next - fifo->dma.virt);
	addr_and_length += ((skb->len + CCAT_ETH_FRAME_HEAD_LEN) / 8) << 24;
	iowrite32(addr_and_length, priv->reg.tx_fifo);

	/* update stats */
	atomic64_add(skb->len, &priv->tx_bytes);

	priv->kfree_skb_any(skb);

	ccat_eth_fifo_inc(fifo);
	/* stop queue if tx ring is full */
	if (!ccat_eth_frame_sent(fifo->next)) {
		priv->stop_queue(priv->netdev);
	}
	return NETDEV_TX_OK;
}
Ejemplo n.º 3
0
static inline struct tracing_map_elt *
__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
{
	u32 idx, key_hash, test_key;
	struct tracing_map_entry *entry;

	key_hash = jhash(key, map->key_size, 0);
	if (key_hash == 0)
		key_hash = 1;
	idx = key_hash >> (32 - (map->map_bits + 1));

	while (1) {
		idx &= (map->map_size - 1);
		entry = TRACING_MAP_ENTRY(map->map, idx);
		test_key = entry->key;

		if (test_key && test_key == key_hash && entry->val &&
		    keys_match(key, entry->val->key, map->key_size)) {
			if (!lookup_only)
				atomic64_inc(&map->hits);
			return entry->val;
		}

		if (!test_key) {
			if (lookup_only)
				break;

			if (!cmpxchg(&entry->key, 0, key_hash)) {
				struct tracing_map_elt *elt;

				elt = get_free_elt(map);
				if (!elt) {
					atomic64_inc(&map->drops);
					entry->key = 0;
					break;
				}

				memcpy(elt->key, key, map->key_size);
				entry->val = elt;
				atomic64_inc(&map->hits);

				return entry->val;
			}
		}

		idx++;
	}

	return NULL;
}
Ejemplo n.º 4
0
const char *call_offer_ng(bencode_item_t *input, struct callmaster *m, bencode_item_t *output, const char* addr,
		const struct sockaddr_in6 *sin)
{
	if (m->conf.max_sessions>0) {
		rwlock_lock_r(&m->hashlock);
		if (g_hash_table_size(m->callhash) >= m->conf.max_sessions) {
			rwlock_unlock_r(&m->hashlock);
			atomic64_inc(&m->totalstats.total_rejected_sess);
			atomic64_inc(&m->totalstats_interval.total_rejected_sess);
			ilog(LOG_ERROR, "Parallel session limit reached (%i)",m->conf.max_sessions);
			return "Parallel session limit reached";
		}
		rwlock_unlock_r(&m->hashlock);
	}
	return call_offer_answer_ng(input, m, output, OP_OFFER, addr, sin);
}
Ejemplo n.º 5
0
struct xnet_msg *xnet_alloc_msg(u8 alloc_flag)
{
    struct xnet_msg *msg;

#ifndef USE_XNET_SIMPLE
    /* fast method */
    if (alloc_flag == XNET_MSG_CACHE)
        return NULL;
    
    /* slow method */
    if (alloc_flag != XNET_MSG_NORMAL)
        return NULL;
#endif

    msg = xzalloc(sizeof(struct xnet_msg));
    if (unlikely(!msg)) {
        hvfs_err(xnet, "xzalloc() struct xnet_msg failed\n");
        return NULL;
    }

    INIT_LIST_HEAD(&msg->list);

#ifdef USE_XNET_SIMPLE
    sem_init(&msg->event, 0, 0);
    atomic64_inc(&g_xnet_prof.msg_alloc);
    atomic_set(&msg->ref, 1);
#endif

    return msg;
}
Ejemplo n.º 6
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct veth_priv *priv = netdev_priv(dev);
	struct net_device *rcv;
	int length = skb->len;

	rcu_read_lock();
	rcv = rcu_dereference(priv->peer);
	if (unlikely(!rcv)) {
		kfree_skb(skb);
		goto drop;
	}
	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
	 * will cause bad checksum on forwarded packets
	 */
	if (skb->ip_summed == CHECKSUM_NONE &&
	    rcv->features & NETIF_F_RXCSUM)
		skb->ip_summed = CHECKSUM_UNNECESSARY;

	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);

		u64_stats_update_begin(&stats->syncp);
		stats->bytes += length;
		stats->packets++;
		u64_stats_update_end(&stats->syncp);
	} else {
drop:
		atomic64_inc(&priv->dropped);
	}
	rcu_read_unlock();
	return NETDEV_TX_OK;
}
Ejemplo n.º 7
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct veth_priv *priv = netdev_priv(dev);
	struct net_device *rcv;
	int length = skb->len;

	rcu_read_lock();
	rcv = rcu_dereference(priv->peer);
	if (unlikely(!rcv)) {
		kfree_skb(skb);
		goto drop;
	}

	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);

		u64_stats_update_begin(&stats->syncp);
		stats->bytes += length;
		stats->packets++;
		u64_stats_update_end(&stats->syncp);
	} else {
drop:
		atomic64_inc(&priv->dropped);
	}
	rcu_read_unlock();
	return NETDEV_TX_OK;
}
Ejemplo n.º 8
0
void quadd_put_sample(struct quadd_record_data *data,
		      struct quadd_iovec *vec, int vec_count)
{
	struct quadd_comm_data_interface *comm = hrt.quadd_ctx->comm;

	comm->put_sample(data, vec, vec_count);
	atomic64_inc(&hrt.counter_samples);
}
Ejemplo n.º 9
0
void ceph_adjust_quota_realms_count(struct inode *inode, bool inc)
{
	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
	if (inc)
		atomic64_inc(&mdsc->quotarealms_count);
	else
		atomic64_dec(&mdsc->quotarealms_count);
}
Ejemplo n.º 10
0
static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
{
	int bucket = fls(time);

	if (bucket)
		bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);

	atomic64_inc(&times[bucket]);
}
void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
	u32 fpscr, orig_fpscr, fpsid, exceptions;

	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
	atomic64_inc(&vfp_bounce_count);

	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));

	fpsid = fmrx(FPSID);
	orig_fpscr = fpscr = fmrx(FPSCR);

	if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
	    && (fpscr & FPSCR_IXE)) {
		goto emulate;
	}

	if (fpexc & FPEXC_EX) {
#ifndef CONFIG_CPU_FEROCEON
		trigger = fmrx(FPINST);
		regs->ARM_pc -= 4;
#endif
	} else if (!(fpexc & FPEXC_DEX)) {
		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
		goto exit;
	}

	if (fpexc & (FPEXC_EX | FPEXC_VV)) {
		u32 len;

		len = fpexc + (1 << FPEXC_LENGTH_BIT);

		fpscr &= ~FPSCR_LENGTH_MASK;
		fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
	}

	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);

	/*
	 * If there isn't a second FP instruction, exit now. Note that
	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
	 */
	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
		goto exit;

	barrier();
	trigger = fmrx(FPINST2);

 emulate:
	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 exit:
	preempt_enable();
}
Ejemplo n.º 12
0
void xnet_raw_free_msg(struct xnet_msg *msg)
{
    if (atomic_dec_return(&msg->ref) == 0) {
        /* FIXME: check whether this msg is in the cache */
        xfree(msg);
#ifdef USE_XNET_SIMPLE
        atomic64_inc(&g_xnet_prof.msg_free);
#endif
    }
}
Ejemplo n.º 13
0
static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb,
				       struct net_device *dev)
{
	struct ccat_eth_priv *const priv = netdev_priv(dev);
	struct ccat_eth_fifo *const fifo = &priv->tx_fifo;

	if (skb_is_nonlinear(skb)) {
		pr_warn("Non linear skb not supported -> drop frame.\n");
		atomic64_inc(&fifo->dropped);
		priv->kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (skb->len > MAX_PAYLOAD_SIZE) {
		pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n",
			(u64) skb->len, (u64) MAX_PAYLOAD_SIZE);
		atomic64_inc(&fifo->dropped);
		priv->kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	if (!fifo->ops->ready(fifo)) {
		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
		priv->stop_queue(priv->netdev);
		return NETDEV_TX_BUSY;
	}

	/* prepare frame in DMA memory */
	fifo->ops->queue.skb(fifo, skb);

	/* update stats */
	atomic64_add(skb->len, &fifo->bytes);

	priv->kfree_skb_any(skb);

	ccat_eth_fifo_inc(fifo);
	/* stop queue if tx ring is full */
	if (!fifo->ops->ready(fifo)) {
		priv->stop_queue(priv->netdev);
	}
	return NETDEV_TX_OK;
}
Ejemplo n.º 14
0
Archivo: arm.c Proyecto: mdamt/linux
/**
 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
 * @kvm	The guest that we are about to run
 *
 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
 * caches and TLBs.
 */
static void update_vttbr(struct kvm *kvm)
{
	phys_addr_t pgd_phys;
	u64 vmid;

	if (!need_new_vmid_gen(kvm))
		return;

	spin_lock(&kvm_vmid_lock);

	/*
	 * We need to re-check the vmid_gen here to ensure that if another vcpu
	 * already allocated a valid vmid for this vm, then this vcpu should
	 * use the same vmid.
	 */
	if (!need_new_vmid_gen(kvm)) {
		spin_unlock(&kvm_vmid_lock);
		return;
	}

	/* First user of a new VMID generation? */
	if (unlikely(kvm_next_vmid == 0)) {
		atomic64_inc(&kvm_vmid_gen);
		kvm_next_vmid = 1;

		/*
		 * On SMP we know no other CPUs can use this CPU's or each
		 * other's VMID after force_vm_exit returns since the
		 * kvm_vmid_lock blocks them from reentry to the guest.
		 */
		force_vm_exit(cpu_all_mask);
		/*
		 * Now broadcast TLB + ICACHE invalidation over the inner
		 * shareable domain to make sure all data structures are
		 * clean.
		 */
		kvm_call_hyp(__kvm_flush_vm_context);
	}

	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
	kvm->arch.vmid = kvm_next_vmid;
	kvm_next_vmid++;
	kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;

	/* update vttbr to be used with the new vmid */
	pgd_phys = virt_to_phys(kvm->arch.pgd);
	BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
	kvm->arch.vttbr = pgd_phys | vmid;

	spin_unlock(&kvm_vmid_lock);
}
Ejemplo n.º 15
0
static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{
	struct fnic *fnic = data;

	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);

	vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
	fnic_log_q_error(fnic);
	fnic_handle_link_event(fnic);

	return IRQ_HANDLED;
}
Ejemplo n.º 16
0
static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
{
	struct fnic *fnic = data;
	unsigned long wq_copy_work_done = 0;

	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);

	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
				 wq_copy_work_done,
				 1 /* unmask intr */,
				 1 /* reset intr timer */);
	return IRQ_HANDLED;
}
Ejemplo n.º 17
0
void mds_forward(struct xnet_msg *msg)
{
    struct mds_fwd *mf;
    struct xnet_msg_tx *tx;
    /* FIXME: we know we are using xnet-simple, so all the receiving iovs are
     * packed into one buf, we should save the begin address here */
    
    xnet_set_auto_free(msg);

    /* sanity checking */
    if (likely(msg->xm_datacheck)) {
        tx = msg->xm_data;
        mf = msg->xm_data + tx->len + sizeof(*tx);
    } else {
        hvfs_err(mds, "Internal error, data lossing ...\n");
        goto out;
    }
#if 0
    {
        int i, pos = 0;
        char line[256];

        memset(line, 0, sizeof(line));
        pos += snprintf(line, 256, "FW request from %lx route ", tx->ssite_id);
        for (i = 0; i < ((mf->len - sizeof(*mf)) / sizeof(u32)); i++) {
            pos += snprintf(line + pos, 256 - pos, "%lx->", mf->route[i]);
        }
        pos += snprintf(line + pos, 256 - pos, "%lx(E).\n", hmo.site_id);
        hvfs_err(mds, "%s", line);
    }
#endif
    memcpy(&msg->tx, tx, sizeof(*tx));
    /* FIXME: we know there is only one iov entry */
    msg->tx.flag |= (XNET_PTRESTORE | XNET_FWD);
    msg->tx.reserved = (u64)msg->xm_data;
    msg->xm_data += sizeof(*tx);
    msg->tx.dsite_id = hmo.site_id;

    atomic64_inc(&hmo.prof.mds.forward);
    mds_fe_dispatch(msg);

    return;
out:
    xnet_free_msg(msg);
}
Ejemplo n.º 18
0
static struct hone_event *__add_files(struct hone_reader *reader,
		struct hone_event *event, struct task_struct *task)
{
	struct hone_event *sk_event;
	struct files_struct *files;
	struct file *file;
	struct fdtable *fdt;
	struct socket *sock;
	struct sock *sk;
	unsigned long flags, set;
	int i, fd;
	
	if (!(files = get_files_struct(task)))
		return event;
	spin_lock_irqsave(&files->file_lock, flags);
	if (!(fdt = files_fdtable(files)))
		goto out;
	for (i = 0; (fd = i * BITS_PER_LONG) < fdt->max_fds; i++) {
		for (set = fdt->OPEN_FDS[i]; set; set >>= 1, fd++) {
			if (!(set & 1))
				continue;
			file = fdt->fd[fd];
			if (!file || file->f_op != &socket_file_ops || !file->private_data)
				continue;
			sock = file->private_data;
			sk = sock->sk;
			if (!sk || (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
				continue;

			if ((sk_event = __alloc_socket_event((unsigned long) sk,
							0, task, GFP_ATOMIC))) {
				sk_event->next = event;
				event = sk_event;
				memcpy(&event->ts, &task->start_time, sizeof(event->ts));
			} else {
				atomic64_inc(&reader->info.dropped.socket);
			}
		}
	}
out:
	spin_unlock_irqrestore(&files->file_lock, flags);
	put_files_struct(files);
	return event;
}
Ejemplo n.º 19
0
static void inc_stats_counter(struct statistics *stats, int type)
{
	atomic64_t *counter;

	switch(type) {
	case HONE_PROCESS:
		counter = &stats->process;
		break;
	case HONE_SOCKET:
		counter = &stats->socket;
		break;
	case HONE_PACKET:
		counter = &stats->packet;
		break;
	default:
		return;
	}
	atomic64_inc(counter);
}
Ejemplo n.º 20
0
static void ccat_eth_receive(struct net_device *const dev,
			     const void *const data, const size_t len)
{
	struct sk_buff *const skb = dev_alloc_skb(len + NET_IP_ALIGN);
	struct ccat_eth_priv *const priv = netdev_priv(dev);

	if (!skb) {
		pr_info("%s() out of memory :-(\n", __FUNCTION__);
		atomic64_inc(&priv->rx_dropped);
		return;
	}
	skb->dev = dev;
	skb_reserve(skb, NET_IP_ALIGN);
	skb_copy_to_linear_data(skb, data, len);
	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, dev);
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	atomic64_add(len, &priv->rx_bytes);
	netif_rx(skb);
}
Ejemplo n.º 21
0
static void ccat_eth_receive(struct ccat_eth_priv *const priv, const size_t len)
{
	struct sk_buff *const skb = dev_alloc_skb(len + NET_IP_ALIGN);
	struct ccat_eth_fifo *const fifo = &priv->rx_fifo;
	struct net_device *const dev = priv->netdev;

	if (!skb) {
		pr_info("%s() out of memory :-(\n", __FUNCTION__);
		atomic64_inc(&fifo->dropped);
		return;
	}
	skb->dev = dev;
	skb_reserve(skb, NET_IP_ALIGN);
	fifo->ops->queue.copy_to_skb(fifo, skb, len);
	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, dev);
	skb->ip_summed = CHECKSUM_UNNECESSARY;
	atomic64_add(len, &fifo->bytes);
	netif_rx(skb);
}
Ejemplo n.º 22
0
static int inline enqueue_event(struct hone_reader *reader,
		struct hone_event *event)
{
	// Ignore threads for now
	if (event->type == HONE_PROCESS && event->process.pid != event->process.tgid)
		return 0;
	// Filter out packets for local socket, if set
	if (event->type == HONE_PACKET && reader->filter_sk &&
			event->packet.sock == (unsigned long) reader->filter_sk) {
		atomic64_inc(&reader->info.filtered);
		return 0;
	}
	get_hone_event(event);
	if (ring_append(&reader->ringbuf, event)) {
		inc_stats_counter(&reader->info.dropped, event->type);
		put_hone_event(event);
		return 0;
	}
	return 1;
}
Ejemplo n.º 23
0
void xnet_free_msg(struct xnet_msg *msg)
{
    if (!msg)
        return;
    
    if (atomic_dec_return(&msg->ref) > 0) {
        return;
    }
    /* Note that, change reqno to zero to prohibit the current access to the
     * xnet_msg by xnet_handle_tx() */
    msg->tx.reqno = 0;

    /* FIXME: we should check the alloc_flag and auto free flag */
    if (msg->pair)
        xnet_free_msg(msg->pair);
    if (unlikely(msg->tx.flag & XNET_PTRESTORE)) {
        msg->xm_data = (void *)msg->tx.reserved;
    }
    if (msg->tx.flag & XNET_NEED_DATA_FREE) {
        if (msg->tx.type == XNET_MSG_REQ) {
            /* check and free the siov */
            xnet_msg_free_sdata(msg);
            xnet_msg_free_rdata(msg);
        } else if (msg->tx.type == XNET_MSG_RPY) {
            /* check and free the riov */
            xnet_msg_free_sdata(msg);
            xnet_msg_free_rdata(msg);
        } else {
            /* FIXME: do we need to free the data region */
        }
    } else {
        if (msg->siov)
            xfree(msg->siov);
        if (msg->riov)
            xfree(msg->riov);
    }
    xfree(msg);
#ifdef USE_XNET_SIMPLE
    atomic64_inc(&g_xnet_prof.msg_free);
#endif
}
Ejemplo n.º 24
0
static struct hone_event *add_current_tasks(
		struct hone_reader *reader, struct hone_event *event)
{
	struct hone_event *proc_event;
	struct task_struct *task;

	rcu_read_lock();
	for (task = &init_task; (task = prev_task(task)) != &init_task; ) {
		if (task->flags & PF_EXITING)
			continue;
		event = __add_files(reader, event, task);
		if ((proc_event = __alloc_process_event(task,
						task->flags & PF_FORKNOEXEC ? PROC_FORK : PROC_EXEC,
						GFP_ATOMIC))) {
			proc_event->next = event;
			event = proc_event;
			memcpy(&event->ts, &task->start_time, sizeof(event->ts));
		} else {
			atomic64_inc(&reader->info.dropped.process);
		}
	}
	rcu_read_unlock();
	return event;
}
Ejemplo n.º 25
0
static irqreturn_t fnic_isr_legacy(int irq, void *data)
{
	struct fnic *fnic = data;
	u32 pba;
	unsigned long work_done = 0;

	pba = vnic_intr_legacy_pba(fnic->legacy_pba);
	if (!pba)
		return IRQ_NONE;

	fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);

	if (pba & (1 << FNIC_INTX_NOTIFY)) {
		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
		fnic_handle_link_event(fnic);
	}

	if (pba & (1 << FNIC_INTX_ERR)) {
		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
		fnic_log_q_error(fnic);
	}

	if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
		work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
		work_done += fnic_wq_cmpl_handler(fnic, -1);
		work_done += fnic_rq_cmpl_handler(fnic, -1);

		vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
					 work_done,
					 1 /* unmask intr */,
					 1 /* reset intr timer */);
	}

	return IRQ_HANDLED;
}
Ejemplo n.º 26
0
/*
 * Package up a bounce condition.
 */
void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
{
	u32 fpscr, orig_fpscr, fpsid, exceptions;

	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
	atomic64_inc(&vfp_bounce_count);

	/*
	 * At this point, FPEXC can have the following configuration:
	 *
	 *  EX DEX IXE
	 *  0   1   x   - synchronous exception
	 *  1   x   0   - asynchronous exception
	 *  1   x   1   - sychronous on VFP subarch 1 and asynchronous on later
	 *  0   0   1   - synchronous on VFP9 (non-standard subarch 1
	 *                implementation), undefined otherwise
	 *
	 * Clear various bits and enable access to the VFP so we can
	 * handle the bounce.
	 */
	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));

	fpsid = fmrx(FPSID);
	orig_fpscr = fpscr = fmrx(FPSCR);

	/*
	 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
	 */
	if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
	    && (fpscr & FPSCR_IXE)) {
		/*
		 * Synchronous exception, emulate the trigger instruction
		 */
		goto emulate;
	}

	if (fpexc & FPEXC_EX) {
#ifndef CONFIG_CPU_FEROCEON
		/*
		 * Asynchronous exception. The instruction is read from FPINST
		 * and the interrupted instruction has to be restarted.
		 */
		trigger = fmrx(FPINST);
		regs->ARM_pc -= 4;
#endif
	} else if (!(fpexc & FPEXC_DEX)) {
		/*
		 * Illegal combination of bits. It can be caused by an
		 * unallocated VFP instruction but with FPSCR.IXE set and not
		 * on VFP subarch 1.
		 */
		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
		goto exit;
	}

	/*
	 * Modify fpscr to indicate the number of iterations remaining.
	 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
	 * whether FPEXC.VECITR or FPSCR.LEN is used.
	 */
	if (fpexc & (FPEXC_EX | FPEXC_VV)) {
		u32 len;

		len = fpexc + (1 << FPEXC_LENGTH_BIT);

		fpscr &= ~FPSCR_LENGTH_MASK;
		fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
	}

	/*
	 * Handle the first FP instruction.  We used to take note of the
	 * FPEXC bounce reason, but this appears to be unreliable.
	 * Emulate the bounced instruction instead.
	 */
	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);

	/*
	 * If there isn't a second FP instruction, exit now. Note that
	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
	 */
	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
		goto exit;

	/*
	 * The barrier() here prevents fpinst2 being read
	 * before the condition above.
	 */
	barrier();
	trigger = fmrx(FPINST2);

 emulate:
	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
	if (exceptions)
		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
 exit:
	preempt_enable();
}
Ejemplo n.º 27
0
int mds_do_forward(struct xnet_msg *msg, u64 dsite)
{
    int err = 0, i, relaied = 0, looped = 0;
    
    /* Note that lots of forward request may incur the system performance, we
     * should do fast forwarding and fast bitmap changing. */
    struct mds_fwd *mf = NULL, *rmf = NULL;
    struct xnet_msg *fmsg;

    if (unlikely(msg->tx.flag & XNET_FWD)) {
        atomic64_inc(&hmo.prof.mds.loop_fwd);
        /* check if this message is looped. if it is looped, we should refresh
         * the bitmap and just forward the message as normal. until receive
         * the second looped request, we stop or slow down the request */
        rmf = (struct mds_fwd *)((void *)(msg->tx.reserved) + 
                                 msg->tx.len + sizeof(msg->tx));
        looped = __mds_fwd_loop_detect(rmf, dsite);
        
        if (unlikely((atomic64_read(&hmo.prof.mds.loop_fwd) + 1) % 
                     MAX_RELAY_FWD == 0)) {
            /* we should trigger the bitmap reload now */
            mds_bitmap_refresh(msg->xm_data);
        }
        relaied = 1;
    }

    mf = xzalloc(sizeof(*mf) + MDS_FWD_MAX * sizeof(u32));
    if (!mf) {
        hvfs_err(mds, "alloc mds_fwd failed.\n");
        err = -ENOMEM;
        goto out;
    }
    mf->len = MDS_FWD_MAX * sizeof(u32) + sizeof(*mf);
    switch (looped) {
    case 0:
        /* not looped request */
        mf->route[0] = hmo.site_id;
        break;
    case 1:        
        /* first loop, copy the entries */
        au_handle_split_sync();
        for (i = 0; i < MDS_FWD_MAX; i++) {
            if (rmf->route[i] != 0)
                mf->route[i] = rmf->route[i];
            else
                break;
        }
        if (i < MDS_FWD_MAX)
            mf->route[i] = hmo.site_id;
        break;
    case 2:
        /* second loop, slow down the forwarding */
        au_handle_split_sync();
        for (i = 0; i < MDS_FWD_MAX; i++) {
            if (rmf->route[i] != 0)
                mf->route[i] = rmf->route[i];
            else
                break;
        }
        if (i < MDS_FWD_MAX)
            mf->route[i] = hmo.site_id;
        break;
    default:;
    }

    fmsg = xnet_alloc_msg(XNET_MSG_CACHE);
    if (!fmsg) {
        hvfs_err(mds, "xnet_alloc_msg() failed, we should retry!\n");
        err = -ENOMEM;
        goto out_free;
    }

#ifdef XNET_EAGER_WRITEV
    xnet_msg_add_sdata(fmsg, &fmsg->tx, sizeof(fmsg->tx));
#endif
    xnet_msg_set_err(fmsg, err);
    xnet_msg_fill_tx(fmsg, XNET_MSG_REQ, 0, hmo.site_id, dsite);
    xnet_msg_fill_cmd(fmsg, HVFS_MDS2MDS_FWREQ, 0, 0);
    xnet_msg_add_sdata(fmsg, &msg->tx, sizeof(msg->tx));

    if (msg->xm_datacheck) {
        if (unlikely(relaied)) {
            xnet_msg_add_sdata(fmsg, msg->xm_data, msg->tx.len);
        } else {
            for (i = 0; i < msg->riov_ulen; i++) {
                xnet_msg_add_sdata(fmsg, msg->riov[i].iov_base, 
                                   msg->riov[i].iov_len);
            }
        }
    }

    /* piggyback the route info @ the last iov entry */
    xnet_msg_add_sdata(fmsg, mf, mf->len);

    err = xnet_send(hmo.xc, fmsg);

    if (err) {
        hvfs_err(mds, "Forwarding the request to %lx failed w/ %d.\n",
                 dsite, err);
    }

    /* cleaning */
    xnet_clear_auto_free(fmsg);
    xnet_free_msg(fmsg);
    
out_free:
    xfree(mf);
out:
    return err;
}
Ejemplo n.º 28
0
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
{
	struct super_block *sb = dir->i_sb;
	struct the_nilfs *nilfs = sb->s_fs_info;
	struct inode *inode;
	struct nilfs_inode_info *ii;
	struct nilfs_root *root;
	int err = -ENOMEM;
	ino_t ino;

	inode = new_inode(sb);
	if (unlikely(!inode))
		goto failed;

	mapping_set_gfp_mask(inode->i_mapping,
			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);

	root = NILFS_I(dir)->i_root;
	ii = NILFS_I(inode);
	ii->i_state = 1 << NILFS_I_NEW;
	ii->i_root = root;

	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
	if (unlikely(err))
		goto failed_ifile_create_inode;
	/* reference count of i_bh inherits from nilfs_mdt_read_block() */

	atomic64_inc(&root->inodes_count);
	inode_init_owner(inode, dir, mode);
	inode->i_ino = ino;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
		err = nilfs_bmap_read(ii->i_bmap, NULL);
		if (err < 0)
			goto failed_bmap;

		set_bit(NILFS_I_BMAP, &ii->i_state);
		/* No lock is needed; iget() ensures it. */
	}

	ii->i_flags = nilfs_mask_flags(
		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);

	/* ii->i_file_acl = 0; */
	/* ii->i_dir_acl = 0; */
	ii->i_dir_start_lookup = 0;
	nilfs_set_inode_flags(inode);
	spin_lock(&nilfs->ns_next_gen_lock);
	inode->i_generation = nilfs->ns_next_generation++;
	spin_unlock(&nilfs->ns_next_gen_lock);
	insert_inode_hash(inode);

	err = nilfs_init_acl(inode, dir);
	if (unlikely(err))
		goto failed_acl; /* never occur. When supporting
				    nilfs_init_acl(), proper cancellation of
				    above jobs should be considered */

	return inode;

 failed_acl:
 failed_bmap:
	clear_nlink(inode);
	iput(inode);  /* raw_inode will be deleted through
			 generic_delete_inode() */
	goto failed;

 failed_ifile_create_inode:
	make_bad_inode(inode);
	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
			 called */
 failed:
	return ERR_PTR(err);
}
Ejemplo n.º 29
0
static __init void test_atomic64(void)
{
	long long v0 = 0xaaa31337c001d00dLL;
	long long v1 = 0xdeadbeefdeafcafeLL;
	long long v2 = 0xfaceabadf00df001LL;
	long long onestwos = 0x1111111122222222LL;
	long long one = 1LL;

	atomic64_t v = ATOMIC64_INIT(v0);
	long long r = v0;
	BUG_ON(v.counter != r);

	atomic64_set(&v, v1);
	r = v1;
	BUG_ON(v.counter != r);
	BUG_ON(atomic64_read(&v) != r);

	TEST(64, add, +=, onestwos);
	TEST(64, add, +=, -one);
	TEST(64, sub, -=, onestwos);
	TEST(64, sub, -=, -one);
	TEST(64, or, |=, v1);
	TEST(64, and, &=, v1);
	TEST(64, xor, ^=, v1);
	TEST(64, andnot, &= ~, v1);

	RETURN_FAMILY_TEST(64, add_return, +=, onestwos);
	RETURN_FAMILY_TEST(64, add_return, +=, -one);
	RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
	RETURN_FAMILY_TEST(64, sub_return, -=, -one);

	FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos);
	FETCH_FAMILY_TEST(64, fetch_add, +=, -one);
	FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos);
	FETCH_FAMILY_TEST(64, fetch_sub, -=, -one);

	FETCH_FAMILY_TEST(64, fetch_or,  |=, v1);
	FETCH_FAMILY_TEST(64, fetch_and, &=, v1);
	FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1);
	FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1);

	INIT(v0);
	atomic64_inc(&v);
	r += one;
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_dec(&v);
	r -= one;
	BUG_ON(v.counter != r);

	INC_RETURN_FAMILY_TEST(64, v0);
	DEC_RETURN_FAMILY_TEST(64, v0);

	XCHG_FAMILY_TEST(64, v0, v1);
	CMPXCHG_FAMILY_TEST(64, v0, v1, v2);

	INIT(v0);
	BUG_ON(atomic64_add_unless(&v, one, v0));
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(!atomic64_add_unless(&v, one, v1));
	r += one;
	BUG_ON(v.counter != r);

	INIT(onestwos);
	BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
	r -= one;
	BUG_ON(v.counter != r);

	INIT(0);
	BUG_ON(atomic64_dec_if_positive(&v) != -one);
	BUG_ON(v.counter != r);

	INIT(-one);
	BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
	BUG_ON(v.counter != r);

	INIT(onestwos);
	BUG_ON(!atomic64_inc_not_zero(&v));
	r += one;
	BUG_ON(v.counter != r);

	INIT(0);
	BUG_ON(atomic64_inc_not_zero(&v));
	BUG_ON(v.counter != r);

	INIT(-one);
	BUG_ON(!atomic64_inc_not_zero(&v));
	r += one;
	BUG_ON(v.counter != r);
}
Ejemplo n.º 30
0
static __init int test_atomic64(void)
{
	long long v0 = 0xaaa31337c001d00dLL;
	long long v1 = 0xdeadbeefdeafcafeLL;
	long long v2 = 0xfaceabadf00df001LL;
	long long onestwos = 0x1111111122222222LL;
	long long one = 1LL;

	atomic64_t v = ATOMIC64_INIT(v0);
	long long r = v0;
	BUG_ON(v.counter != r);

	atomic64_set(&v, v1);
	r = v1;
	BUG_ON(v.counter != r);
	BUG_ON(atomic64_read(&v) != r);

	INIT(v0);
	atomic64_add(onestwos, &v);
	r += onestwos;
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_add(-one, &v);
	r += -one;
	BUG_ON(v.counter != r);

	INIT(v0);
	r += onestwos;
	BUG_ON(atomic64_add_return(onestwos, &v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	r += -one;
	BUG_ON(atomic64_add_return(-one, &v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_sub(onestwos, &v);
	r -= onestwos;
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_sub(-one, &v);
	r -= -one;
	BUG_ON(v.counter != r);

	INIT(v0);
	r -= onestwos;
	BUG_ON(atomic64_sub_return(onestwos, &v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	r -= -one;
	BUG_ON(atomic64_sub_return(-one, &v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_inc(&v);
	r += one;
	BUG_ON(v.counter != r);

	INIT(v0);
	r += one;
	BUG_ON(atomic64_inc_return(&v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	atomic64_dec(&v);
	r -= one;
	BUG_ON(v.counter != r);

	INIT(v0);
	r -= one;
	BUG_ON(atomic64_dec_return(&v) != r);
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(atomic64_xchg(&v, v1) != v0);
	r = v1;
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
	r = v1;
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(atomic64_add_unless(&v, one, v0));
	BUG_ON(v.counter != r);

	INIT(v0);
	BUG_ON(!atomic64_add_unless(&v, one, v1));
	r += one;
	BUG_ON(v.counter != r);

#ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
	INIT(onestwos);
	BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
	r -= one;
	BUG_ON(v.counter != r);

	INIT(0);
	BUG_ON(atomic64_dec_if_positive(&v) != -one);
	BUG_ON(v.counter != r);

	INIT(-one);
	BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
	BUG_ON(v.counter != r);
#else
#warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol
#endif

	INIT(onestwos);
	BUG_ON(!atomic64_inc_not_zero(&v));
	r += one;
	BUG_ON(v.counter != r);

	INIT(0);
	BUG_ON(atomic64_inc_not_zero(&v));
	BUG_ON(v.counter != r);

	INIT(-one);
	BUG_ON(!atomic64_inc_not_zero(&v));
	r += one;
	BUG_ON(v.counter != r);

#ifdef CONFIG_X86
	pr_info("passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
		"x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
		"i586+",
#else
		"i386+",
#endif
	       boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
	       boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
	pr_info("passed\n");
#endif

	return 0;
}