Example #1
0
/* opa_vnic_process_vema_config - process vema configuration updates */
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
{
	struct __opa_veswport_info *info = &adapter->info;
	struct rdma_netdev *rn = netdev_priv(adapter->netdev);
	u8 port_num[OPA_VESW_MAX_NUM_DEF_PORT] = { 0 };
	struct net_device *netdev = adapter->netdev;
	u8 i, port_count = 0;
	u16 port_mask;

	/* If the base_mac_addr is changed, update the interface mac address */
	if (memcmp(info->vport.base_mac_addr, adapter->vema_mac_addr,
		   ARRAY_SIZE(info->vport.base_mac_addr))) {
		struct sockaddr saddr;

		memcpy(saddr.sa_data, info->vport.base_mac_addr,
		       ARRAY_SIZE(info->vport.base_mac_addr));
		mutex_lock(&adapter->lock);
		eth_mac_addr(netdev, &saddr);
		memcpy(adapter->vema_mac_addr,
		       info->vport.base_mac_addr, ETH_ALEN);
		mutex_unlock(&adapter->lock);
	}

	rn->set_id(netdev, info->vesw.vesw_id);

	/* Handle MTU limit change */
	rtnl_lock();
	netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu_non_vlan,
				netdev->min_mtu);
	if (netdev->mtu > netdev->max_mtu)
		dev_set_mtu(netdev, netdev->max_mtu);
	rtnl_unlock();

	/* Update flow to default port redirection table */
	port_mask = info->vesw.def_port_mask;
	for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) {
		if (port_mask & 1)
			port_num[port_count++] = i;
		port_mask >>= 1;
	}

	/*
	 * Build the flow table. Flow table is required when destination LID
	 * is not available. Up to OPA_VNIC_FLOW_TBL_SIZE flows supported.
	 * Each flow need a default port number to get its dlid from the
	 * u_ucast_dlid array.
	 */
	for (i = 0; i < OPA_VNIC_FLOW_TBL_SIZE; i++)
		adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
						    OPA_VNIC_INVALID_PORT;

	/* Operational state can only be DROP_ALL or FORWARDING */
	if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING) {
		info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
		netif_dormant_off(netdev);
	} else {
		info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
		netif_dormant_on(netdev);
	}
}
Example #2
0
File: net.c Project: avagin/linux
static int most_nd_open(struct net_device *dev)
{
	struct net_dev_context *nd = netdev_priv(dev);
	int ret = 0;

	mutex_lock(&probe_disc_mt);

	if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
		netdev_err(dev, "most_start_channel() failed\n");
		ret = -EBUSY;
		goto unlock;
	}

	if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
		netdev_err(dev, "most_start_channel() failed\n");
		most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
		ret = -EBUSY;
		goto unlock;
	}

	netif_carrier_off(dev);
	if (is_valid_ether_addr(dev->dev_addr))
		netif_dormant_off(dev);
	else
		netif_dormant_on(dev);
	netif_wake_queue(dev);
	if (nd->iface->request_netinfo)
		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);

unlock:
	mutex_unlock(&probe_disc_mt);
	return ret;
}
Example #3
0
int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
{
	cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
	const size_t size = sizeof(cisco_proto);
	cisco_proto new_settings;
	hdlc_device *hdlc = dev_to_hdlc(dev);
	int result;

	switch (ifr->ifr_settings.type) {
	case IF_GET_PROTO:
		ifr->ifr_settings.type = IF_PROTO_CISCO;
		if (ifr->ifr_settings.size < size) {
			ifr->ifr_settings.size = size; /* data size wanted */
			return -ENOBUFS;
		}
		if (copy_to_user(cisco_s, &hdlc->state.cisco.settings, size))
			return -EFAULT;
		return 0;

	case IF_PROTO_CISCO:
		if(!capable(CAP_NET_ADMIN))
			return -EPERM;

		if(dev->flags & IFF_UP)
			return -EBUSY;

		if (copy_from_user(&new_settings, cisco_s, size))
			return -EFAULT;

		if (new_settings.interval < 1 ||
		    new_settings.timeout < 2)
			return -EINVAL;

		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);

		if (result)
			return result;

		hdlc_proto_detach(hdlc);
		memcpy(&hdlc->state.cisco.settings, &new_settings, size);
		memset(&hdlc->proto, 0, sizeof(hdlc->proto));

		hdlc->proto.start = cisco_start;
		hdlc->proto.stop = cisco_stop;
		hdlc->proto.netif_rx = cisco_rx;
		hdlc->proto.type_trans = cisco_type_trans;
		hdlc->proto.id = IF_PROTO_CISCO;
		dev->hard_start_xmit = hdlc->xmit;
		dev->hard_header = cisco_hard_header;
		dev->hard_header_cache = NULL;
		dev->type = ARPHRD_CISCO;
		dev->flags = IFF_POINTOPOINT | IFF_NOARP;
		dev->addr_len = 0;
		netif_dormant_on(dev);
		return 0;
	}

	return -EINVAL;
}
Example #4
0
static void cisco_stop(struct net_device *dev)
{
	hdlc_device *hdlc = dev_to_hdlc(dev);
	del_timer_sync(&hdlc->state.cisco.timer);
	netif_dormant_on(dev);
	hdlc->state.cisco.up = 0;
	hdlc->state.cisco.request_sent = 0;
}
Example #5
0
static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
{
	cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
	const size_t size = sizeof(cisco_proto);
	cisco_proto new_settings;
	hdlc_device *hdlc = dev_to_hdlc(dev);
	int result;

	switch (ifr->ifr_settings.type) {
	case IF_GET_PROTO:
		if (dev_to_hdlc(dev)->proto != &proto)
			return -EINVAL;
		ifr->ifr_settings.type = IF_PROTO_CISCO;
		if (ifr->ifr_settings.size < size) {
			ifr->ifr_settings.size = size; /* data size wanted */
			return -ENOBUFS;
		}
		if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
			return -EFAULT;
		return 0;

	case IF_PROTO_CISCO:
		if(!capable(CAP_NET_ADMIN))
			return -EPERM;

		if(dev->flags & IFF_UP)
			return -EBUSY;

		if (copy_from_user(&new_settings, cisco_s, size))
			return -EFAULT;

		if (new_settings.interval < 1 ||
		    new_settings.timeout < 2)
			return -EINVAL;

		result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
		if (result)
			return result;

		result = attach_hdlc_protocol(dev, &proto,
					      sizeof(struct cisco_state));
		if (result)
			return result;

		memcpy(&state(hdlc)->settings, &new_settings, size);
		spin_lock_init(&state(hdlc)->lock);
		dev->hard_start_xmit = hdlc->xmit;
		dev->header_ops = &cisco_header_ops;
		dev->type = ARPHRD_CISCO;
		netif_dormant_on(dev);
		return 0;
	}

	return -EINVAL;
}
Example #6
0
static void cisco_stop(struct net_device *dev)
{
	hdlc_device *hdlc = dev_to_hdlc(dev);
	struct cisco_state *st = state(hdlc);
	unsigned long flags;

	del_timer_sync(&st->timer);

	spin_lock_irqsave(&st->lock, flags);
	netif_dormant_on(dev);
	st->up = st->txseq = 0;
	spin_unlock_irqrestore(&st->lock, flags);
}
Example #7
0
static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
	hdlc_device *hdlc = dev_to_hdlc(dev);
	struct ppp *ppp;
	int result;

	switch (ifr->ifr_settings.type) {
	case IF_GET_PROTO:
		if (dev_to_hdlc(dev)->proto != &proto)
			return -EINVAL;
		ifr->ifr_settings.type = IF_PROTO_PPP;
		return 0; /* return protocol only, no settable parameters */

	case IF_PROTO_PPP:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;

		if (dev->flags & IFF_UP)
			return -EBUSY;

		/* no settable parameters */

		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
		if (result)
			return result;

		result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
		if (result)
			return result;

		ppp = get_ppp(dev);
		spin_lock_init(&ppp->lock);
		ppp->req_timeout = 2;
		ppp->cr_retries = 10;
		ppp->term_retries = 2;
		ppp->keepalive_interval = 10;
		ppp->keepalive_timeout = 60;

		dev->hard_header_len = sizeof(struct hdlc_header);
		dev->header_ops = &ppp_header_ops;
		dev->type = ARPHRD_PPP;
		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
		netif_dormant_on(dev);
		return 0;
	}

	return -EINVAL;
}
Example #8
0
static void vlan_transfer_operstate(const struct net_device *dev, struct net_device *vlandev)
{
	/* Have to respect userspace enforced dormant state
	 * of real device, also must allow supplicant running
	 * on VLAN device
	 */
	if (dev->operstate == IF_OPER_DORMANT)
		netif_dormant_on(vlandev);
	else
		netif_dormant_off(vlandev);

	if (netif_carrier_ok(dev)) {
		if (!netif_carrier_ok(vlandev))
			netif_carrier_on(vlandev);
	} else {
		if (netif_carrier_ok(vlandev))
			netif_carrier_off(vlandev);
	}
}
Example #9
0
static void cisco_timer(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;
	hdlc_device *hdlc = dev_to_hdlc(dev);
	struct cisco_state *st = state(hdlc);

	spin_lock(&st->lock);
	if (st->up &&
	    time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
		st->up = 0;
		netdev_info(dev, "Link down\n");
		netif_dormant_on(dev);
	}

	cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
			     htonl(st->rxseq));
	spin_unlock(&st->lock);

	st->timer.expires = jiffies + st->settings.interval * HZ;
	st->timer.function = cisco_timer;
	st->timer.data = arg;
	add_timer(&st->timer);
}
Example #10
0
static void cisco_timer(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;
	hdlc_device *hdlc = dev_to_hdlc(dev);

	if (hdlc->state.cisco.up &&
	    time_after(jiffies, hdlc->state.cisco.last_poll +
		       hdlc->state.cisco.settings.timeout * HZ)) {
		hdlc->state.cisco.up = 0;
		printk(KERN_INFO "%s: Link down\n", dev->name);
		netif_dormant_on(dev);
	}

	cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
			     ++hdlc->state.cisco.txseq,
			     hdlc->state.cisco.rxseq);
	hdlc->state.cisco.request_sent = 1;
	hdlc->state.cisco.timer.expires = jiffies +
		hdlc->state.cisco.settings.interval * HZ;
	hdlc->state.cisco.timer.function = cisco_timer;
	hdlc->state.cisco.timer.data = arg;
	add_timer(&hdlc->state.cisco.timer);
}
Example #11
0
/* SCA: RCR+ must supply id, len and data
   SCN: RCR- must supply code, id, len and data
   STA: RTR must supply id
   SCJ: RUC must supply CP packet len and data */
static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
			 u8 id, unsigned int len, const void *data)
{
	int old_state, action;
	struct ppp *ppp = get_ppp(dev);
	struct proto *proto = get_proto(dev, pid);

	old_state = proto->state;
	BUG_ON(old_state >= STATES);
	BUG_ON(event >= EVENTS);

#if DEBUG_STATE
	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
	       proto_name(pid), event_names[event], state_names[proto->state]);
#endif

	action = cp_table[event][old_state];

	proto->state = action & STATE_MASK;
	if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
		mod_timer(&proto->timer, proto->timeout =
			  jiffies + ppp->req_timeout * HZ);
	if (action & ZRC)
		proto->restart_counter = 0;
	if (action & IRC)
		proto->restart_counter = (proto->state == STOPPING) ?
			ppp->term_retries : ppp->cr_retries;

	if (action & SCR)	/* send Configure-Request */
		ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
			  0, NULL);
	if (action & SCA)	/* send Configure-Ack */
		ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
	if (action & SCN)	/* send Configure-Nak/Reject */
		ppp_tx_cp(dev, pid, code, id, len, data);
	if (action & STR)	/* send Terminate-Request */
		ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
	if (action & STA)	/* send Terminate-Ack */
		ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
	if (action & SCJ)	/* send Code-Reject */
		ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);

	if (old_state != OPENED && proto->state == OPENED) {
		printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid));
		if (pid == PID_LCP) {
			netif_dormant_off(dev);
			ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
			ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
			ppp->last_pong = jiffies;
			mod_timer(&proto->timer, proto->timeout =
				  jiffies + ppp->keepalive_interval * HZ);
		}
	}
	if (old_state == OPENED && proto->state != OPENED) {
		printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid));
		if (pid == PID_LCP) {
			netif_dormant_on(dev);
			ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
			ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
		}
	}
	if (old_state != CLOSED && proto->state == CLOSED)
		del_timer(&proto->timer);

#if DEBUG_STATE
	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
	       proto_name(pid), event_names[event], state_names[proto->state]);
#endif
}