コード例 #1
0
ファイル: l2tp_eth.c プロジェクト: 7799/linux
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct l2tp_eth *priv = netdev_priv(dev);
	struct l2tp_session *session = priv->session;
	unsigned int len = skb->len;
	int ret = l2tp_xmit_skb(session, skb, session->hdr_len);

	if (likely(ret == NET_XMIT_SUCCESS)) {
		atomic_long_add(len, &priv->tx_bytes);
		atomic_long_inc(&priv->tx_packets);
	} else {
		atomic_long_inc(&priv->tx_dropped);
	}
	return NETDEV_TX_OK;
}
コード例 #2
0
ファイル: modanalizer.c プロジェクト: Cierpliwy/modanalizer
/* --------------------------- Kprobes handler ----------------------------- */
static int ma_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
        struct ma_symbol_data *data =
                container_of(p, struct ma_symbol_data, kp);
        atomic_long_inc(&data->calls);
        return 0;
}
コード例 #3
0
ファイル: ima_api.c プロジェクト: ReneNyffenegger/linux
/*
 * ima_add_violation - add violation to measurement list.
 *
 * Violations are flagged in the measurement list with zero hash values.
 * By extending the PCR with 0xFF's instead of with zeroes, the PCR
 * value is invalidated.
 */
void ima_add_violation(struct file *file, const unsigned char *filename,
		       struct integrity_iint_cache *iint,
		       const char *op, const char *cause)
{
	struct ima_template_entry *entry;
	struct inode *inode = file_inode(file);
	struct ima_event_data event_data = {iint, file, filename, NULL, 0,
					    cause};
	int violation = 1;
	int result;

	/* can overflow, only indicator */
	atomic_long_inc(&ima_htable.violations);

	result = ima_alloc_init_template(&event_data, &entry);
	if (result < 0) {
		result = -ENOMEM;
		goto err_out;
	}
	result = ima_store_template(entry, violation, inode,
				    filename, CONFIG_IMA_MEASURE_PCR_IDX);
	if (result < 0)
		ima_free_template_entry(entry);
err_out:
	integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
			    op, cause, result, 0);
}
コード例 #4
0
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
	atomic_long_inc(&mcs_op_statistics[op].count);
	atomic_long_add(clks, &mcs_op_statistics[op].total);
	if (mcs_op_statistics[op].max < clks)
		mcs_op_statistics[op].max = clks;
}
コード例 #5
0
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
	unsigned long nsec;

	nsec = CLKS2NSEC(clks);
	atomic_long_inc(&mcs_op_statistics[op].count);
	atomic_long_add(nsec, &mcs_op_statistics[op].total);
	if (mcs_op_statistics[op].max < nsec)
		mcs_op_statistics[op].max = nsec;
}
コード例 #6
0
ファイル: tnc_misc.c プロジェクト: CSCLOG/beaglebone
/**
 * ubifs_load_znode - load znode to TNC cache.
 * @c: UBIFS file-system description object
 * @zbr: znode branch
 * @parent: znode's parent
 * @iip: index in parent
 *
 * This function loads znode pointed to by @zbr into the TNC cache and
 * returns pointer to it in case of success and a negative error code in case
 * of failure.
 */
struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
				     struct ubifs_zbranch *zbr,
				     struct ubifs_znode *parent, int iip)
{
	int err;
	struct ubifs_znode *znode;

	ubifs_assert(!zbr->znode);
	/*
	 * A slab cache is not presently used for znodes because the znode size
	 * depends on the fanout which is stored in the superblock.
	 */
	znode = kzalloc(c->max_znode_sz, GFP_NOFS);
	if (!znode)
		return ERR_PTR(-ENOMEM);

	err = read_znode(c, zbr->lnum, zbr->offs, zbr->len, znode);
	if (err)
		goto out;

	atomic_long_inc(&c->clean_zn_cnt);

	/*
	 * Increment the global clean znode counter as well. It is OK that
	 * global and per-FS clean znode counters may be inconsistent for some
	 * short time (because we might be preempted at this point), the global
	 * one is only used in shrinker.
	 */
	atomic_long_inc(&ubifs_clean_zn_cnt);

	zbr->znode = znode;
	znode->parent = parent;
	znode->time = get_seconds();
	znode->iip = iip;

	return znode;

out:
	kfree(znode);
	return ERR_PTR(err);
}
コード例 #7
0
ファイル: l2tp_eth.c プロジェクト: AllenWeb/linux
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct l2tp_eth *priv = netdev_priv(dev);
	struct l2tp_session *session = priv->session;

	atomic_long_add(skb->len, &priv->tx_bytes);
	atomic_long_inc(&priv->tx_packets);

	l2tp_xmit_skb(session, skb, session->hdr_len);

	return NETDEV_TX_OK;
}
コード例 #8
0
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
	struct io_context *src = *psrc;
	struct io_context *dst = *pdst;

	if (src) {
		BUG_ON(atomic_long_read(&src->refcount) == 0);
		atomic_long_inc(&src->refcount);
		put_io_context(dst);
		*pdst = src;
	}
}
コード例 #9
0
ファイル: l2tp_eth.c プロジェクト: 7799/linux
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
	struct net_device *dev = spriv->dev;
	struct l2tp_eth *priv = netdev_priv(dev);

	if (session->debug & L2TP_MSG_DATA) {
		unsigned int length;

		length = min(32u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		pr_debug("%s: eth recv\n", session->name);
		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
	}

	if (!pskb_may_pull(skb, ETH_HLEN))
		goto error;

	secpath_reset(skb);

	/* checksums verified by L2TP */
	skb->ip_summed = CHECKSUM_NONE;

	skb_dst_drop(skb);
	nf_reset(skb);

	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
		atomic_long_inc(&priv->rx_packets);
		atomic_long_add(data_len, &priv->rx_bytes);
	} else {
		atomic_long_inc(&priv->rx_errors);
	}
	return;

error:
	atomic_long_inc(&priv->rx_errors);
	kfree_skb(skb);
}
コード例 #10
0
ファイル: vport.c プロジェクト: Flipkart/linux
/**
 *	ovs_vport_record_error - indicate device error to generic stats layer
 *
 * @vport: vport that encountered the error
 * @err_type: one of enum vport_err_type types to indicate the error type
 *
 * If using the vport generic stats layer indicate that an error of the given
 * type has occurred.
 */
static void ovs_vport_record_error(struct vport *vport,
				   enum vport_err_type err_type)
{
	switch (err_type) {
	case VPORT_E_RX_DROPPED:
		atomic_long_inc(&vport->err_stats.rx_dropped);
		break;

	case VPORT_E_RX_ERROR:
		atomic_long_inc(&vport->err_stats.rx_errors);
		break;

	case VPORT_E_TX_DROPPED:
		atomic_long_inc(&vport->err_stats.tx_dropped);
		break;

	case VPORT_E_TX_ERROR:
		atomic_long_inc(&vport->err_stats.tx_errors);
		break;
	}

}
コード例 #11
0
// consumer function
void * consumer(void * arg) {
    int myid= *(int*)arg;
    int * data;

    ff::Barrier::instance()->doBarrier(myid);
    while(1) {
	if (q->pop((void**)&data)) {
	    printf("(%d %ld) ", myid, (long)data);
	    atomic_long_inc(&counter);
	}
	if ((long)(atomic_long_read(&counter))>= SIZE) break;
    }
    pthread_exit(NULL);
    return NULL;
}
コード例 #12
0
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
{
	struct kmem_cache_node *n = get_node(s, node);

	/*
	 * May be called early in order to allocate a slab for the
	 * kmem_cache_node structure. Solve the chicken-egg
	 * dilemma by deferring the increment of the count during
	 * bootstrap (see early_kmem_cache_node_alloc).
	 */
	if (n) {
		atomic_long_inc(&n->nr_slabs);
		atomic_long_add(objects, &n->total_objects);
	}
}
コード例 #13
0
ファイル: ubuffer.hpp プロジェクト: 19199883/gangof4
    // Multipush: push a bach of items.
    inline bool multipush() {
        if (buf_w->multipush(multipush_buf,MULTIPUSH_BUFFER_SIZE)) {
            mcnt=0;
            return true;
        }

        if (fixedsize) return false;
        // try to get a new buffer
        INTERNAL_BUFFER_T * t = pool.next_w(size);
        assert(t); // if (!t) return false; // EWOULDBLOCK
        buf_w = t;
        in_use_buffers++;
        buf_w->multipush(multipush_buf,MULTIPUSH_BUFFER_SIZE);
        mcnt=0;
#if defined(UBUFFER_STATS)
        atomic_long_inc(&numBuffers);
#endif
        return true;
    }
コード例 #14
0
static void iscsit_handle_time2retain_timeout(unsigned long data)
{
	struct iscsi_session *sess = (struct iscsi_session *) data;
	struct iscsi_portal_group *tpg = sess->tpg;
	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;

	spin_lock_bh(&se_tpg->session_lock);
	if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
		spin_unlock_bh(&se_tpg->session_lock);
		return;
	}
	if (atomic_read(&sess->session_reinstatement)) {
		pr_err("Exiting Time2Retain handler because"
				" session_reinstatement=1\n");
		spin_unlock_bh(&se_tpg->session_lock);
		return;
	}
	sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;

	pr_err("Time2Retain timer expired for SID: %u, cleaning up"
			" iSCSI session.\n", sess->sid);
	{
	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;

	if (tiqn) {
		spin_lock(&tiqn->sess_err_stats.lock);
		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
			(void *)sess->sess_ops->InitiatorName);
		tiqn->sess_err_stats.last_sess_failure_type =
				ISCSI_SESS_ERR_CXN_TIMEOUT;
		tiqn->sess_err_stats.cxn_timeout_errors++;
		atomic_long_inc(&sess->conn_timeout_errors);
		spin_unlock(&tiqn->sess_err_stats.lock);
	}
	}

	spin_unlock_bh(&se_tpg->session_lock);
	target_put_session(sess->se_sess);
}
コード例 #15
0
ファイル: ubuffer.hpp プロジェクト: 19199883/gangof4
    /**
     *  Push Method: push the input value into the queue.\n
     *  If fixedsize has been set to \p true, this method may
     *  return false. This means EWOULDBLOCK
     *  and the call should be retried.
     *
     *  \param data Data to be pushed in the buffer
     *  \return \p false if \p fixedsize is set to \p true OR if \p data is NULL
     *  OR if there is not a buffer to write to.
     *
     *  \return \p true if the push succedes.
     */
    inline bool push(void * const data) {
        /* NULL values cannot be pushed in the queue */
        if (!data || !buf_w) return false;

        // If fixedsize has been set to \p true, this method may
        // return false. This means EWOULDBLOCK
        if (!available()) {

            if (fixedsize) return false;

            // try to get a new buffer
            INTERNAL_BUFFER_T * t = pool.next_w(size);
            assert(t); //if (!t) return false; // EWOULDBLOCK
            buf_w = t;
            in_use_buffers++;
#if defined(UBUFFER_STATS)
            atomic_long_inc(&numBuffers);
#endif
        }
        //DBG(assert(buf_w->push(data)); return true;);
        buf_w->push(data);
        return true;
    }
コード例 #16
0
/**
 * scfs_readpages
 *
 * Parameters:
 * @file: upper file
 * @*mapping: address_space struct for the file
 * @*pages: list of pages to read in
 * @nr_pages: number of pages to read in
 *
 * Return:
 * SCFS_SUCCESS if success, otherwise if error
 *
 * Description:
 * - Asynchronously read pages for readahead. A scaling number of background threads
 *   will read & decompress them in a slightly deferred but parallelized manner.
 */
static int
scfs_readpages(struct file *file, struct address_space *mapping,
		struct list_head *pages, unsigned nr_pages)
{
	struct scfs_inode_info *sii = SCFS_I(file->f_mapping->host);
	struct scfs_sb_info *sbi = SCFS_S(file->f_mapping->host->i_sb);
	struct file *lower_file = NULL;
	struct page *page;
	struct scfs_cinfo cinfo;
	loff_t i_size;
	pgoff_t start, end;
	int page_idx, page_idx_readahead = 1024, ret = 0;
	int readahead_page = 0;
	int prev_cbi = 0;
	int prev_cluster = -1, cur_cluster = -1;
	int cluster_idx = 0;

	i_size = i_size_read(&sii->vfs_inode);
	if (!i_size) {
		SCFS_PRINT("file %s: i_size is zero, "
			"flags 0x%x sii->clust_info_size %d\n",
			file->f_path.dentry->d_name.name, sii->flags,
			sii->cinfo_array_size);
		return 0;
	}

#ifdef SCFS_ASYNC_READ_PROFILE
	atomic_add(nr_pages, &sbi->scfs_standby_readpage_count);
#endif

#ifdef SCFS_NOTIFY_RANDOM_READ
	lower_file = scfs_lower_file(file);
	if (!lower_file) {
		SCFS_PRINT_ERROR("file %s: lower file is null!\n",
		        file->f_path.dentry->d_name.name);
		return -EINVAL;
	}

	/* if the read request was random (enough), hint it to the lower file. 
	 * scfs_sequential_page_number is the tunable threshold.
	 * filemap.c will later on refer to this FMODE_RANDOM flag.
	*/
	spin_lock(&lower_file->f_lock);
	if (nr_pages > sbi->scfs_sequential_page_number)
		lower_file->f_mode &= ~FMODE_RANDOM;
	else
		lower_file->f_mode |= FMODE_RANDOM;
	spin_unlock(&lower_file->f_lock);
#endif
	lower_file = scfs_lower_file(file);
	page = list_entry(pages->prev, struct page, lru);
	cluster_idx = page->index / (sii->cluster_size / PAGE_SIZE);

	if (sii->compressed) {
		mutex_lock(&sii->cinfo_mutex);
		ret = get_cluster_info(file, cluster_idx, &cinfo);
		mutex_unlock(&sii->cinfo_mutex);
		if (ret) {
			SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"
				"i_size %lld\n", ret, i_size);
			return ret;
		}

		if (!cinfo.size || cinfo.size > sii->cluster_size) {
			SCFS_PRINT_ERROR("file %s: cinfo is invalid, "
				"clust %u cinfo.size %u\n",
				file->f_path.dentry->d_name.name,
				cluster_idx, cinfo.size);
			return -EINVAL;
		}
		start = (pgoff_t)(cinfo.offset / PAGE_SIZE);
	} else {
		start = (pgoff_t)(cluster_idx * sii->cluster_size / PAGE_SIZE);
	}

	cluster_idx = (page->index + nr_pages - 1) / (sii->cluster_size / PAGE_SIZE);
	if (sii->compressed) {
		mutex_lock(&sii->cinfo_mutex);
		ret = get_cluster_info(file, cluster_idx, &cinfo);
		mutex_unlock(&sii->cinfo_mutex);
		if (ret) {
			SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"
				"i_size %lld\n", ret, i_size);
			return ret;
		}

		if (!cinfo.size || cinfo.size > sii->cluster_size) {
			SCFS_PRINT_ERROR("file %s: cinfo is invalid, "
				"clust %u cinfo.size %u\n",
				file->f_path.dentry->d_name.name,
				cluster_idx, cinfo.size);
			return -EINVAL;
		}
		end = (pgoff_t)((cinfo.offset + cinfo.size -1) / PAGE_SIZE);
	} else {
		end = (pgoff_t)(((cluster_idx + 1) * sii->cluster_size - 1) / PAGE_SIZE);
		/* check upper inode size */

		/* out of range? on compressed file, it is handled returning error,
		   which one is right? */
		if (end > (i_size / PAGE_SIZE))
			end = (i_size / PAGE_SIZE);
	}
	force_page_cache_readahead(lower_file->f_mapping, lower_file,
		start, (unsigned long)(end - start +1));

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		page = list_entry(pages->prev, struct page, lru);
		list_del(&page->lru);

		if (PageReadahead(page))
			page_idx_readahead = page_idx;

		ret = add_to_page_cache_lru(page, mapping,
				      page->index, GFP_KERNEL);
		if (ret) {
			SCFS_PRINT("adding to page cache failed, "
				"page %x page->idx %d ret %d\n",
				page, page->index, ret);
			page_cache_release(page);
			continue;
		}

		/* memory buffer is full or synchronous read request -
		   call scfs_readpage to read now */
		if (sbi->page_buffer_next_filling_index_smb ==
				MAX_PAGE_BUFFER_SIZE_SMB || page_idx < page_idx_readahead) {
			cur_cluster = PAGE_TO_CLUSTER_INDEX(page, sii);

			if (prev_cluster == cur_cluster && prev_cbi > 0)
				prev_cbi = _scfs_readpage(file, page, prev_cbi - 1);
			else
				prev_cbi = _scfs_readpage(file, page, -1);

			prev_cluster = cur_cluster;
			page_cache_release(page); /* refer line 701 */
		} else {
			spin_lock(&sbi->spinlock_smb);

			/* Queue is not full so add the page into the queue.
			   Also, here we increase file->f_count to protect
			   the file structs from multi-threaded accesses */
			atomic_long_inc(&SCFS_F(file)->lower_file->f_count);
			atomic_long_inc(&file->f_count);
			sbi->page_buffer_smb[sbi->page_buffer_next_filling_index_smb] = page;
			sbi->file_buffer_smb[sbi->page_buffer_next_filling_index_smb++] = file;

			/* check whether page buffer is full and set page buffer full if needed */
			if (((sbi->page_buffer_next_filling_index_smb == MAX_PAGE_BUFFER_SIZE_SMB) &&
				sbi->page_buffer_next_io_index_smb == 0) ||
				(sbi->page_buffer_next_filling_index_smb ==
				sbi->page_buffer_next_io_index_smb))
				sbi->page_buffer_next_filling_index_smb = MAX_PAGE_BUFFER_SIZE_SMB;
			else if (sbi->page_buffer_next_filling_index_smb == MAX_PAGE_BUFFER_SIZE_SMB)
				sbi->page_buffer_next_filling_index_smb = 0;
			spin_unlock(&sbi->spinlock_smb);
			++readahead_page;
		}
		//page_cache_release(page);
	}

	if (readahead_page > 0)
		wakeup_smb_thread(sbi);

	SCFS_PRINT("<e>\n");

#ifdef SCFS_ASYNC_READ_PROFILE
	atomic_sub(nr_pages, &sbi->scfs_standby_readpage_count);
#endif
	return 0;
}
コード例 #17
0
ファイル: vlan_dev.c プロジェクト: ANFS/ANFS-kernel
/*
 *	Determine the packet's protocol ID. The rule here is that we
 *	assume 802.3 if the type field is short enough to be a length.
 *	This is normal practice and works for any 'now in use' protocol.
 *
 *  Also, at this point we assume that we ARE dealing exclusively with
 *  VLAN packets, or packets that should be made into VLAN packets based
 *  on a default VLAN ID.
 *
 *  NOTE:  Should be similar to ethernet/eth.c.
 *
 *  SANITY NOTE:  This method is called when a packet is moving up the stack
 *                towards userland.  To get here, it would have already passed
 *                through the ethernet/eth.c eth_type_trans() method.
 *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
 *                 stored UNALIGNED in the memory.  RISC systems don't like
 *                 such cases very much...
 *  SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
 *  		    aligned, so there doesn't need to be any of the unaligned
 *  		    stuff.  It has been commented out now...  --Ben
 *
 */
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
		  struct packet_type *ptype, struct net_device *orig_dev)
{
	struct vlan_hdr *vhdr;
	struct vlan_pcpu_stats *rx_stats;
	struct net_device *vlan_dev;
	u16 vlan_id;
	u16 vlan_tci;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (skb == NULL)
		goto err_free;

	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
		goto err_free;

	vhdr = (struct vlan_hdr *)skb->data;
	vlan_tci = ntohs(vhdr->h_vlan_TCI);
	vlan_id = vlan_tci & VLAN_VID_MASK;

	rcu_read_lock();
	vlan_dev = vlan_find_dev(dev, vlan_id);

	/* If the VLAN device is defined, we use it.
	 * If not, and the VID is 0, it is a 802.1p packet (not
	 * really a VLAN), so we will just netif_rx it later to the
	 * original interface, but with the skb->proto set to the
	 * wrapped proto: we do nothing here.
	 */

	if (!vlan_dev) {
		if (vlan_id) {
			pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
				 __func__, vlan_id, dev->name);
			goto err_unlock;
		}
		rx_stats = NULL;
	} else {
		skb->dev = vlan_dev;

		rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);

		u64_stats_update_begin(&rx_stats->syncp);
		rx_stats->rx_packets++;
		rx_stats->rx_bytes += skb->len;

		skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);

		pr_debug("%s: priority: %u for TCI: %hu\n",
			 __func__, skb->priority, vlan_tci);

		switch (skb->pkt_type) {
		case PACKET_BROADCAST:
			/* Yeah, stats collect these together.. */
			/* stats->broadcast ++; // no such counter :-( */
			break;

		case PACKET_MULTICAST:
			rx_stats->rx_multicast++;
			break;

		case PACKET_OTHERHOST:
			/* Our lower layer thinks this is not local, let's make
			 * sure.
			 * This allows the VLAN to have a different MAC than the
			 * underlying device, and still route correctly.
			 */
			if (!compare_ether_addr(eth_hdr(skb)->h_dest,
						skb->dev->dev_addr))
				skb->pkt_type = PACKET_HOST;
			break;
		default:
			break;
		}
		u64_stats_update_end(&rx_stats->syncp);
	}

	skb_pull_rcsum(skb, VLAN_HLEN);
	vlan_set_encap_proto(skb, vhdr);

	if (vlan_dev) {
		skb = vlan_check_reorder_header(skb);
		if (!skb) {
			rx_stats->rx_errors++;
			goto err_unlock;
		}
	}

	netif_rx(skb);

	rcu_read_unlock();
	return NET_RX_SUCCESS;

err_unlock:
	rcu_read_unlock();
err_free:
	atomic_long_inc(&dev->rx_dropped);
	kfree_skb(skb);
	return NET_RX_DROP;
}
コード例 #18
0
ファイル: ff_wrapper.hpp プロジェクト: jzsu/fix8
	value_type operator++(int) // postfix
		{ value_type v(atomic_long_read(&_rep)); atomic_long_inc(&_rep); return v; }
コード例 #19
0
ファイル: xfrm_device.c プロジェクト: krzk/linux
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
	int err;
	unsigned long flags;
	struct xfrm_state *x;
	struct sk_buff *skb2;
	struct softnet_data *sd;
	netdev_features_t esp_features = features;
	struct xfrm_offload *xo = xfrm_offload(skb);

	if (!xo)
		return skb;

	if (!(features & NETIF_F_HW_ESP))
		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);

	x = skb->sp->xvec[skb->sp->len - 1];
	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
		return skb;

	local_irq_save(flags);
	sd = this_cpu_ptr(&softnet_data);
	err = !skb_queue_empty(&sd->xfrm_backlog);
	local_irq_restore(flags);

	if (err) {
		*again = true;
		return skb;
	}

	if (skb_is_gso(skb)) {
		struct net_device *dev = skb->dev;

		if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
			struct sk_buff *segs;

			/* Packet got rerouted, fixup features and segment it. */
			esp_features = esp_features & ~(NETIF_F_HW_ESP
							| NETIF_F_GSO_ESP);

			segs = skb_gso_segment(skb, esp_features);
			if (IS_ERR(segs)) {
				kfree_skb(skb);
				atomic_long_inc(&dev->tx_dropped);
				return NULL;
			} else {
				consume_skb(skb);
				skb = segs;
			}
		}
	}

	if (!skb->next) {
		x->outer_mode->xmit(x, skb);

		xo->flags |= XFRM_DEV_RESUME;

		err = x->type_offload->xmit(x, skb, esp_features);
		if (err) {
			if (err == -EINPROGRESS)
				return NULL;

			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			kfree_skb(skb);
			return NULL;
		}

		skb_push(skb, skb->data - skb_mac_header(skb));

		return skb;
	}

	skb2 = skb;

	do {
		struct sk_buff *nskb = skb2->next;
		skb2->next = NULL;

		xo = xfrm_offload(skb2);
		xo->flags |= XFRM_DEV_RESUME;

		x->outer_mode->xmit(x, skb2);

		err = x->type_offload->xmit(x, skb2, esp_features);
		if (!err) {
			skb2->next = nskb;
		} else if (err != -EINPROGRESS) {
			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			skb2->next = nskb;
			kfree_skb_list(skb2);
			return NULL;
		} else {
			if (skb == skb2)
				skb = nskb;

			if (!skb)
				return NULL;

			goto skip_push;
		}

		skb_push(skb2, skb2->data - skb_mac_header(skb2));

skip_push:
		skb2 = nskb;
	} while (skb2);

	return skb;
}
コード例 #20
0
ファイル: blk-ioc.c プロジェクト: DevSwift/LT22-kernel
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
 *
 * Increment reference count to @ioc.
 */
void get_io_context(struct io_context *ioc)
{
	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	atomic_long_inc(&ioc->refcount);
}
コード例 #21
0
inline void qdio_perf_stat_inc(atomic_long_t *count)
{
	if (qdio_performance_stats)
		atomic_long_inc(count);
}
コード例 #22
0
/**
 * workingset_activation - note a page activation
 * @page: page that is being activated
 */
void workingset_activation(struct page *page)
{
	atomic_long_inc(&page_zone(page)->inactive_age);
}