/*
 * Module ioctl interface.
 */
long lttngprofile_module_ioctl(
    struct file *file, unsigned int cmd, unsigned long arg)
{
  u32 hash;
  struct process_key_t key;
  struct process_val_t *val;
  struct lttngprofile_module_msg msg;
  struct task_struct *task = get_current();
  int ret = 0;
  void __user *umsg = (void *) arg;

  if (cmd != LTTNGPROFILE_MODULE_IOCTL)
    return -ENOIOCTLCMD;

  if (copy_from_user(&msg, umsg, sizeof(struct lttngprofile_module_msg)))
    return -EFAULT;

  key.tgid = task->tgid;
  hash = jhash(&key, sizeof(key), 0);

  switch(msg.cmd) {
  case LTTNGPROFILE_MODULE_REGISTER:
    /* check if already registered */
    rcu_read_lock();
    val = find_process(&key, hash);
    if (val) {
      rcu_read_unlock();
      break;
    }
    rcu_read_unlock();
    /* do registration */
    val = kzalloc(sizeof(struct process_val_t), GFP_KERNEL);
    val->tgid = key.tgid;
    val->latency_threshold = msg.latency_threshold;
    hash_add_rcu(process_map, &val->hlist, hash);
    printk("lttngprofile_module_ioctl register %d\n", task->tgid);
    break;
  case LTTNGPROFILE_MODULE_UNREGISTER:
    process_unregister(task->tgid);
    break;
  default:
    ret = -ENOTSUPP;
    break;
  }

  return ret;
}
/*
 * Probes.
 */
static void syscall_entry_probe(
    void *__data, struct pt_regs *regs, long id)
{
  u32 hash;
  struct process_val_t *process_val;
  struct thread_key_t thread_key;
  struct thread_val_t *thread_val;
  struct task_struct *task = get_current();
  uint64_t sys_entry_ts = trace_clock_read64();

  // Check whether the process is registered to receive signals.
  rcu_read_lock();
  process_val = find_current_process();

  if (process_val == NULL) {
    rcu_read_unlock();
    return;
  }

  // Keep track of the timestamp of this syscall entry.
  thread_key.pid = task->pid;
  hash = jhash(&thread_key, sizeof(thread_key), 0);
  thread_val = find_thread(&thread_key, hash);

  if (thread_val != NULL) {
    thread_val->sys_entry_ts = sys_entry_ts;
    thread_val->sys_id = id;
    rcu_read_unlock();
    return;
  }

  rcu_read_unlock();

  thread_val = kzalloc(sizeof(struct thread_val_t), GFP_KERNEL);
  thread_val->pid = task->pid;
  thread_val->sys_entry_ts = sys_entry_ts;
  thread_val->sys_id = id;
  hash_add_rcu(thread_map, &thread_val->hlist, hash);
}
Пример #3
0
/**
 * nfp_flower_add_offload() - Adds a new flow to hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
 * @egress:	NFP netdev is the egress.
 *
 * Adds a new flow to the repeated hash structure and action payload.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
		       struct tc_cls_flower_offload *flow, bool egress)
{
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
	struct nfp_port *port = nfp_port_from_netdev(netdev);
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_key_ls *key_layer;
	struct net_device *ingr_dev;
	int err;

	ingr_dev = egress ? NULL : netdev;
	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
					      NFP_FL_STATS_CTX_DONT_CARE);
	if (flow_pay) {
		/* Ignore as duplicate if it has been added by different cb. */
		if (flow_pay->ingress_offload && egress)
			return 0;
		else
			return -EOPNOTSUPP;
	}

	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
	if (!key_layer)
		return -ENOMEM;

	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
					      &tun_type);
	if (err)
		goto err_free_key_ls;

	flow_pay = nfp_flower_allocate_new(key_layer, egress);
	if (!flow_pay) {
		err = -ENOMEM;
		goto err_free_key_ls;
	}

	flow_pay->ingress_dev = egress ? NULL : netdev;

	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
					    tun_type);
	if (err)
		goto err_destroy_flow;

	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
	if (err)
		goto err_destroy_flow;

	err = nfp_compile_flow_metadata(app, flow, flow_pay,
					flow_pay->ingress_dev);
	if (err)
		goto err_destroy_flow;

	err = nfp_flower_xmit_flow(netdev, flow_pay,
				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
	if (err)
		goto err_destroy_flow;

	INIT_HLIST_NODE(&flow_pay->link);
	flow_pay->tc_flower_cookie = flow->cookie;
	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
	port->tc_offload_cnt++;

	/* Deallocate flow payload when flower rule has been destroyed. */
	kfree(key_layer);

	return 0;

err_destroy_flow:
	kfree(flow_pay->action_data);
	kfree(flow_pay->mask_data);
	kfree(flow_pay->unmasked_data);
	kfree(flow_pay);
err_free_key_ls:
	kfree(key_layer);
	return err;
}
Пример #4
0
/**
 * ixgbe_ipsec_add_sa - program device with a security association
 * @xs: pointer to transformer state struct
 **/
static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
{
	struct net_device *dev = xs->xso.dev;
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_ipsec *ipsec = adapter->ipsec;
	struct ixgbe_hw *hw = &adapter->hw;
	int checked, match, first;
	u16 sa_idx;
	int ret;
	int i;

	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
			   xs->id.proto);
		return -EINVAL;
	}

	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
		struct rx_sa rsa;

		if (xs->calg) {
			netdev_err(dev, "Compression offload not supported\n");
			return -EINVAL;
		}

		/* find the first unused index */
		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
		if (ret < 0) {
			netdev_err(dev, "No space for SA in Rx table!\n");
			return ret;
		}
		sa_idx = (u16)ret;

		memset(&rsa, 0, sizeof(rsa));
		rsa.used = true;
		rsa.xs = xs;

		if (rsa.xs->id.proto & IPPROTO_ESP)
			rsa.decrypt = xs->ealg || xs->aead;

		/* get the key and salt */
		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
		if (ret) {
			netdev_err(dev, "Failed to get key data for Rx SA table\n");
			return ret;
		}

		/* get ip for rx sa table */
		if (xs->props.family == AF_INET6)
			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
		else
			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);

		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
		 * check for a matching IP addr entry in the table.  If the addr
		 * already exists, use it; else find an unused slot and add the
		 * addr.  If one does not exist and there are no unused table
		 * entries, fail the request.
		 */

		/* Find an existing match or first not used, and stop looking
		 * after we've checked all we know we have.
		 */
		checked = 0;
		match = -1;
		first = -1;
		for (i = 0;
		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
		     (checked < ipsec->num_rx_sa || first < 0);
		     i++) {
			if (ipsec->ip_tbl[i].used) {
				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
					match = i;
					break;
				}
				checked++;
			} else if (first < 0) {
				first = i;  /* track the first empty seen */
			}
		}

		if (ipsec->num_rx_sa == 0)
			first = 0;

		if (match >= 0) {
			/* addrs are the same, we should use this one */
			rsa.iptbl_ind = match;
			ipsec->ip_tbl[match].ref_cnt++;

		} else if (first >= 0) {
			/* no matches, but here's an empty slot */
			rsa.iptbl_ind = first;

			memcpy(ipsec->ip_tbl[first].ipaddr,
			       rsa.ipaddr, sizeof(rsa.ipaddr));
			ipsec->ip_tbl[first].ref_cnt = 1;
			ipsec->ip_tbl[first].used = true;

			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);

		} else {
			/* no match and no empty slot */
			netdev_err(dev, "No space for SA in Rx IP SA table\n");
			memset(&rsa, 0, sizeof(rsa));
			return -ENOSPC;
		}

		rsa.mode = IXGBE_RXMOD_VALID;
		if (rsa.xs->id.proto & IPPROTO_ESP)
			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
		if (rsa.decrypt)
			rsa.mode |= IXGBE_RXMOD_DECRYPT;
		if (rsa.xs->props.family == AF_INET6)
			rsa.mode |= IXGBE_RXMOD_IPV6;

		/* the preparations worked, so save the info */
		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));

		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
				      rsa.salt, rsa.mode, rsa.iptbl_ind);
		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;

		ipsec->num_rx_sa++;

		/* hash the new entry for faster search in Rx path */
		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
			     rsa.xs->id.spi);
	} else {
		struct tx_sa tsa;

		/* find the first unused index */
		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
		if (ret < 0) {
			netdev_err(dev, "No space for SA in Tx table\n");
			return ret;
		}
		sa_idx = (u16)ret;

		memset(&tsa, 0, sizeof(tsa));
		tsa.used = true;
		tsa.xs = xs;

		if (xs->id.proto & IPPROTO_ESP)
			tsa.encrypt = xs->ealg || xs->aead;

		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
		if (ret) {
			netdev_err(dev, "Failed to get key data for Tx SA table\n");
			memset(&tsa, 0, sizeof(tsa));
			return ret;
		}

		/* the preparations worked, so save the info */
		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));

		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);

		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;

		ipsec->num_tx_sa++;
	}

	/* enable the engine if not already warmed up */
	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
		ixgbe_ipsec_start_engine(adapter);
		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
	}

	return 0;
}