Ejemplo n.º 1
0
/*
 * Snoop SM MADs for port info and P_Key table sets, so we can
 * synthesize LID change and P_Key change events.
 */
static void smp_snoop(struct ib_device *ibdev,
		      u8 port_num,
		      struct ib_mad *mad)
{
	struct ib_event event;

	if ((mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {
		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
			update_sm_ah(to_mdev(ibdev), port_num,
				     be16_to_cpup((__be16 *) (mad->data + 58)),
				     (*(u8 *) (mad->data + 76)) & 0xf);

			event.device           = ibdev;
			event.event            = IB_EVENT_LID_CHANGE;
			event.element.port_num = port_num;
			ib_dispatch_event(&event);
		}

		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
			event.device           = ibdev;
			event.event            = IB_EVENT_PKEY_CHANGE;
			event.element.port_num = port_num;
			ib_dispatch_event(&event);
		}
	}
}
Ejemplo n.º 2
0
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
			  enum mlx5_dev_event event, unsigned long param)
{
	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
	struct ib_event ibev;

	u8 port = 0;

	switch (event) {
	case MLX5_DEV_EVENT_SYS_ERROR:
		ibdev->ib_active = false;
		ibev.event = IB_EVENT_DEVICE_FATAL;
		break;

	case MLX5_DEV_EVENT_PORT_UP:
		ibev.event = IB_EVENT_PORT_ACTIVE;
		port = (u8)param;
		break;

	case MLX5_DEV_EVENT_PORT_DOWN:
		ibev.event = IB_EVENT_PORT_ERR;
		port = (u8)param;
		break;

	case MLX5_DEV_EVENT_PORT_INITIALIZED:
		/* not used by ULPs */
		return;

	case MLX5_DEV_EVENT_LID_CHANGE:
		ibev.event = IB_EVENT_LID_CHANGE;
		port = (u8)param;
		break;

	case MLX5_DEV_EVENT_PKEY_CHANGE:
		ibev.event = IB_EVENT_PKEY_CHANGE;
		port = (u8)param;
		break;

	case MLX5_DEV_EVENT_GUID_CHANGE:
		ibev.event = IB_EVENT_GID_CHANGE;
		port = (u8)param;
		break;

	case MLX5_DEV_EVENT_CLIENT_REREG:
		ibev.event = IB_EVENT_CLIENT_REREGISTER;
		port = (u8)param;
		break;
	}

	ibev.device	      = &ibdev->ib_dev;
	ibev.element.port_num = port;

	if (port < 1 || port > ibdev->num_ports) {
		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
		return;
	}

	if (ibdev->ib_active)
		ib_dispatch_event(&ibev);
}
Ejemplo n.º 3
0
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
			  enum mlx4_dev_event event, int subtype,
			  int port)
{
	struct ib_event ibev;

	switch (event) {
	case MLX4_EVENT_TYPE_PORT_CHANGE:
		ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
			IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
		break;

	case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
		ibev.event = IB_EVENT_DEVICE_FATAL;
		break;

	default:
		return;
	}

	ibev.device	      = ibdev_ptr;
	ibev.element.port_num = port;

	ib_dispatch_event(&ibev);
}
static void handle_catas(struct mthca_dev *dev)
{
	struct ib_event event;
	const char *type;
	int i;

	event.device = &dev->ib_dev;
	event.event  = IB_EVENT_DEVICE_FATAL;
	event.element.port_num = 0;

	ib_dispatch_event(&event);

	switch (swab32(readl(dev->catas_err.map)) >> 24) {
	case MTHCA_CATAS_TYPE_INTERNAL:
		type = "internal error";
		break;
	case MTHCA_CATAS_TYPE_UPLINK:
		type = "uplink bus error";
		break;
	case MTHCA_CATAS_TYPE_DDR:
		type = "DDR data error";
		break;
	case MTHCA_CATAS_TYPE_PARITY:
		type = "internal parity error";
		break;
	default:
		type = "unknown error";
		break;
	}

	mthca_err(dev, "Catastrophic error detected: %s\n", type);
	for (i = 0; i < dev->catas_err.size; ++i)
		mthca_err(dev, "  buf[%02x]: %08x\n",
			  i, swab32(readl(dev->catas_err.map + i)));
}
Ejemplo n.º 5
0
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
	struct ib_event event;

	event.device		= ib_dev;
	event.element.port_num	= port;
	event.event		= IB_EVENT_GID_CHANGE;

	ib_dispatch_event(&event);
}
Ejemplo n.º 6
0
static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
{
	struct ib_event err_event;

	err_event.event = IB_EVENT_PORT_ERR;
	err_event.element.port_num = 1;
	err_event.device = &dev->ibdev;
	ib_dispatch_event(&err_event);
	return 0;
}
Ejemplo n.º 7
0
static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
{
	struct ib_event port_event;

	port_event.event = IB_EVENT_PORT_ACTIVE;
	port_event.element.port_num = 1;
	port_event.device = &dev->ibdev;
	ib_dispatch_event(&port_event);
	return 0;
}
Ejemplo n.º 8
0
static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
{
	struct ib_event event;
	struct qib_devdata *dd = ppd->dd;

	event.device = &dd->verbs_dev.ibdev;
	event.element.port_num = ppd->port;
	event.event = ev;
	ib_dispatch_event(&event);
}
/*
 * Snoop SM MADs for port info and P_Key table sets, so we can
 * synthesize LID change and P_Key change events.
 */
static void smp_snoop(struct ib_device *ibdev,
		      u8 port_num,
		      struct ib_mad *mad,
		      u16 prev_lid)
{
	struct ib_event event;

	if ((mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     mad->mad_hdr.mgmt_class  == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    mad->mad_hdr.method     == IB_MGMT_METHOD_SET) {
		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
			struct ib_port_info *pinfo =
				(struct ib_port_info *) ((struct ib_smp *) mad)->data;
			u16 lid = be16_to_cpu(pinfo->lid);

			mthca_update_rate(to_mdev(ibdev), port_num);
			update_sm_ah(to_mdev(ibdev), port_num,
				     be16_to_cpu(pinfo->sm_lid),
				     pinfo->neighbormtu_mastersmsl & 0xf);

			event.device           = ibdev;
			event.element.port_num = port_num;

			if (pinfo->clientrereg_resv_subnetto & 0x80) {
				event.event    = IB_EVENT_CLIENT_REREGISTER;
				ib_dispatch_event(&event);
			}

			if (prev_lid != lid) {
				event.event    = IB_EVENT_LID_CHANGE;
				ib_dispatch_event(&event);
			}
		}

		if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
			event.device           = ibdev;
			event.event            = IB_EVENT_PKEY_CHANGE;
			event.element.port_num = port_num;
			ib_dispatch_event(&event);
		}
	}
}
Ejemplo n.º 10
0
static void dispatch_port_event(struct ehca_shca *shca, int port_num,
				enum ib_event_type type, const char *msg)
{
	struct ib_event event;

	ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
	event.device = &shca->ib_device;
	event.event = type;
	event.element.port_num = port_num;
	ib_dispatch_event(&event);
}
Ejemplo n.º 11
0
static void rxe_port_event(struct rxe_dev *rxe,
			   enum ib_event_type event)
{
	struct ib_event ev;

	ev.device = &rxe->ib_dev;
	ev.element.port_num = 1;
	ev.event = event;

	ib_dispatch_event(&ev);
}
Ejemplo n.º 12
0
static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
				   enum ib_event_type type)
{
	struct ib_event ibev;

	ibev.device = &dev->ibdev;
	ibev.element.port_num = port_num;
	ibev.event = type;

	ib_dispatch_event(&ibev);
}
Ejemplo n.º 13
0
static int write_gid(struct ib_device *ib_dev, u8 port,
		     struct ib_gid_table *table, int ix,
		     const union ib_gid *gid,
		     const struct ib_gid_attr *attr,
		     enum gid_table_write_action action,
		     bool  default_gid)
{
	int ret = 0;
	struct net_device *old_net_dev;
	unsigned long flags;

	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
	 * sleep-able lock.
	 */
	write_lock_irqsave(&table->data_vec[ix].lock, flags);

	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
		write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
		 * RoCE providers and thus only updates the cache.
		 */
		if (action == GID_TABLE_WRITE_ACTION_ADD)
			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
					      &table->data_vec[ix].context);
		else if (action == GID_TABLE_WRITE_ACTION_DEL)
			ret = ib_dev->del_gid(ib_dev, port, ix,
					      &table->data_vec[ix].context);
		write_lock_irqsave(&table->data_vec[ix].lock, flags);
	}

	old_net_dev = table->data_vec[ix].attr.ndev;
	if (old_net_dev && old_net_dev != attr->ndev)
		dev_put(old_net_dev);
	/* if modify_gid failed, just delete the old gid */
	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
		gid = &zgid;
		attr = &zattr;
		table->data_vec[ix].context = NULL;
	}
	if (default_gid)
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
	if (table->data_vec[ix].attr.ndev &&
	    table->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(table->data_vec[ix].attr.ndev);

	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;

	write_unlock_irqrestore(&table->data_vec[ix].lock, flags);

	if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) {
		struct ib_event event;

		event.device		= ib_dev;
		event.element.port_num	= port;
		event.event		= IB_EVENT_GID_CHANGE;

		ib_dispatch_event(&event);
	}
	return ret;
}
static int write_gid(struct ib_device *ib_dev, u8 port,
		     struct ib_roce_gid_cache *cache, int ix,
		     const union ib_gid *gid,
		     const struct ib_gid_attr *attr)
{
	unsigned int orig_seq;
	int ret;
	struct dev_put_rcu	*put_rcu;
	struct net_device *old_net_dev;

	orig_seq = cache->data_vec[ix].seq;
	cache->data_vec[ix].seq = -1;
	/* Ensure that all readers will see invalid sequence
	 * identifier before starting the actual GID update.
	 */
	smp_wmb();

	ret = ib_dev->modify_gid(ib_dev, port, ix, gid, attr,
				 &cache->data_vec[ix].context);

	if (memcmp(gid, &zgid, sizeof(*gid)) &&
	    (ret == -EADDRNOTAVAIL))
		goto out;
	old_net_dev = cache->data_vec[ix].attr.ndev;
	if (old_net_dev && old_net_dev != attr->ndev) {
		put_rcu = kmalloc(sizeof(*put_rcu), GFP_KERNEL);
		if (put_rcu) {
			put_rcu->ndev = old_net_dev;
			call_rcu(&put_rcu->rcu, put_ndev);
		} else {
			pr_warn("roce_gid_cache: can't allocate rcu context, using synchronize\n");
			synchronize_rcu();
			dev_put(old_net_dev);
		}
	}
	/* if modify_gid failed, just delete the old gid */
	if (ret || !memcmp(gid, &zgid, sizeof(*gid))) {
		gid = &zgid;
		attr = &zattr;
		cache->data_vec[ix].context = NULL;
	}
	memcpy(&cache->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&cache->data_vec[ix].attr, attr, sizeof(*attr));
	if (cache->data_vec[ix].attr.ndev &&
	    cache->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(cache->data_vec[ix].attr.ndev);

	/* Ensure that all cached gid data updating is finished before
	 * marking the entry as available.
	 */
	smp_wmb();

out:
	if (++orig_seq == (unsigned int)-1)
		orig_seq = 0;
	ACCESS_ONCE(cache->data_vec[ix].seq) = orig_seq;

	if (!ret) {
		struct ib_event event;

		event.device		= ib_dev;
		event.element.port_num	= port;
		event.event		= IB_EVENT_GID_CHANGE;

		ib_dispatch_event(&event);
	}

	return (ret == -EADDRNOTAVAIL) ? 0 : ret;
}
Ejemplo n.º 15
0
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
    struct ib_event event;
    u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
    u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);

    switch (ec) {
    case 0x30: /* port availability change */
        if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
            ehca_info(&shca->ib_device,
                      "port %x is active.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ACTIVE;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
            ib_dispatch_event(&event);
        } else {
            ehca_info(&shca->ib_device,
                      "port %x is inactive.", port);
            event.device = &shca->ib_device;
            event.event = IB_EVENT_PORT_ERR;
            event.element.port_num = port;
            shca->sport[port - 1].port_state = IB_PORT_DOWN;
            ib_dispatch_event(&event);
        }
        break;
    case 0x31:
        /* port configuration change
         * disruptive change is caused by
         * LID, PKEY or SM change
         */
        ehca_warn(&shca->ib_device,
                  "disruptive port %x configuration change", port);

        ehca_info(&shca->ib_device,
                  "port %x is inactive.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ERR;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_DOWN;
        ib_dispatch_event(&event);

        ehca_info(&shca->ib_device,
                  "port %x is active.", port);
        event.device = &shca->ib_device;
        event.event = IB_EVENT_PORT_ACTIVE;
        event.element.port_num = port;
        shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
        ib_dispatch_event(&event);
        break;
    case 0x32: /* adapter malfunction */
        ehca_err(&shca->ib_device, "Adapter malfunction.");
        break;
    case 0x33:  /* trace stopped */
        ehca_err(&shca->ib_device, "Traced stopped.");
        break;
    default:
        ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
                 ec, shca->ib_device.name);
        break;
    }

    return;
}