Exemple #1
0
/* copy saddr & daddr, possibly using 64bit load/store
 * Equivalent to :	flow->src = iph->saddr;
 *			flow->dst = iph->daddr;
 */
static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
{
	BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
		     offsetof(typeof(*flow), src) + sizeof(flow->src));
	memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
}
Exemple #2
0
static inline int
nvif_notify_put_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_put_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
		.ntfy.index = notify->index,
	};

	if (atomic_inc_return(&notify->putcnt) != 1)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_put(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
		int ret = nvif_notify_put_(notify);
		if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
			flush_work(&notify->work);
		return ret;
	}
	return 0;
}

static inline int
nvif_notify_get_(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_get_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
		.ntfy.index = notify->index,
	};

	if (atomic_dec_return(&notify->putcnt) != 0)
		return 0;

	return nvif_object_ioctl(object, &args, sizeof(args), NULL);
}

int
nvif_notify_get(struct nvif_notify *notify)
{
	if (likely(notify->object) &&
	    !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
		return nvif_notify_get_(notify);
	return 0;
}

static inline int
nvif_notify_func(struct nvif_notify *notify, bool keep)
{
	int ret = notify->func(notify);
	if (ret == NVIF_NOTIFY_KEEP ||
	    !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		if (!keep)
			atomic_dec(&notify->putcnt);
		else
			nvif_notify_get_(notify);
	}
	return ret;
}

static void
nvif_notify_work(struct work_struct *work)
{
	struct nvif_notify *notify = container_of(work, typeof(*notify), work);
	nvif_notify_func(notify, true);
}

int
nvif_notify(const void *header, u32 length, const void *data, u32 size)
{
	struct nvif_notify *notify = NULL;
	const union {
		struct nvif_notify_rep_v0 v0;
	} *args = header;
	int ret = NVIF_NOTIFY_DROP;

	if (length == sizeof(args->v0) && args->v0.version == 0) {
		if (WARN_ON(args->v0.route))
			return NVIF_NOTIFY_DROP;
		notify = (void *)(unsigned long)args->v0.token;
	}

	if (!WARN_ON(notify == NULL)) {
		struct nvif_client *client = nvif_client(notify->object);
		if (!WARN_ON(notify->size != size)) {
			atomic_inc(&notify->putcnt);
			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
				memcpy((void *)notify->data, data, size);
				schedule_work(&notify->work);
				return NVIF_NOTIFY_DROP;
			}
			notify->data = data;
			ret = nvif_notify_func(notify, client->driver->keep);
			notify->data = NULL;
		}
	}

	return ret;
}

int
nvif_notify_fini(struct nvif_notify *notify)
{
	struct nvif_object *object = notify->object;
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_del_v0 ntfy;
	} args = {
		.ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
		.ntfy.index = notify->index,
	};
	int ret = nvif_notify_put(notify);
	if (ret >= 0 && object) {
		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
		if (ret == 0) {
			nvif_object_ref(NULL, &notify->object);
			kfree((void *)notify->data);
		}
	}
	return ret;
}

int
nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
		 int (*func)(struct nvif_notify *), bool work, u8 event,
		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
{
	struct {
		struct nvif_ioctl_v0 ioctl;
		struct nvif_ioctl_ntfy_new_v0 ntfy;
		struct nvif_notify_req_v0 req;
	} *args;
	int ret = -ENOMEM;

	notify->object = NULL;
	nvif_object_ref(object, &notify->object);
	notify->flags = 0;
	atomic_set(&notify->putcnt, 1);
	notify->dtor = dtor;
	notify->func = func;
	notify->data = NULL;
	notify->size = reply;
	if (work) {
		INIT_WORK(&notify->work, nvif_notify_work);
		set_bit(NVIF_NOTIFY_WORK, &notify->flags);
		notify->data = kmalloc(notify->size, GFP_KERNEL);
		if (!notify->data)
			goto done;
	}

	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
		goto done;
	args->ioctl.version = 0;
	args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
	args->ntfy.version = 0;
	args->ntfy.event = event;
	args->req.version = 0;
	args->req.reply = notify->size;
	args->req.route = 0;
	args->req.token = (unsigned long)(void *)notify;

	memcpy(args->req.data, data, size);
	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
	notify->index = args->ntfy.index;
	kfree(args);
done:
	if (ret)
		nvif_notify_fini(notify);
	return ret;
}

static void
nvif_notify_del(struct nvif_notify *notify)
{
	nvif_notify_fini(notify);
	kfree(notify);
}

void
nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
{
	BUG_ON(notify != NULL);
	if (*pnotify)
		(*pnotify)->dtor(*pnotify);
	*pnotify = notify;
}
Exemple #3
0
namemap_t::namemap_t(int _allocated = DEFAULT){
	allocated = _allocated;
	size = 0;
	refs = (typeof(refs)) malloc (allocated * sizeof(*refs));
	ids = (typeof(ids)) malloc (allocated * sizeof(*ids));
}
Exemple #4
0
/**
 * Send put command to hw.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				struct drm_vmw_control_stream_arg *arg,
				bool interruptible)
{
	struct vmw_escape_video_flush *flush;
	size_t fifo_size;
	bool have_so = dev_priv->sou_priv ? true : false;
	int i, num_items;
	SVGAGuestPtr ptr;

	struct {
		struct vmw_escape_header escape;
		struct {
			uint32_t cmdType;
			uint32_t streamId;
		} header;
	} *cmds;
	struct {
		uint32_t registerId;
		uint32_t value;
	} *items;

	/* defines are a index needs + 1 */
	if (have_so)
		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
	else
		num_items = SVGA_VIDEO_PITCH_3 + 1;

	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;

	cmds = vmw_fifo_reserve(dev_priv, fifo_size);
	/* hardware has hung, can't do anything here */
	if (!cmds)
		return -ENOMEM;

	items = (typeof(items))&cmds[1];
	flush = (struct vmw_escape_video_flush *)&items[num_items];

	/* the size is header + number of items */
	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));

	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
	cmds->header.streamId = arg->stream_id;

	/* the IDs are neatly numbered */
	for (i = 0; i < num_items; i++)
		items[i].registerId = i;

	vmw_bo_get_guest_ptr(&buf->base, &ptr);
	ptr.offset += arg->offset;

	items[SVGA_VIDEO_ENABLED].value     = true;
	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
	items[SVGA_VIDEO_FORMAT].value      = arg->format;
	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
	items[SVGA_VIDEO_SIZE].value        = arg->size;
	items[SVGA_VIDEO_WIDTH].value       = arg->width;
	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
	if (have_so) {
		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
	}

	fill_flush(flush, arg->stream_id);

	vmw_fifo_commit(dev_priv, fifo_size);

	return 0;
}
__private_extern__
int pp_filter_register()
{
    struct sflt_filter pp_filter = {
        0, // sf_handle
        SFLT_GLOBAL, // sf_flags
        0, // sf_name
        ppfilter_unregister, // sf_unregistered
        ppfilter_attach, // sf_attach
        ppfilter_detach, // sf_detach
        NULL, // sf_notify
        NULL, // sf_getpeername
        NULL, // sf_getsockname
        NULL, // sf_data_in
        NULL, // sf_data_out
        ppfilter_connect_in, // sf_connect_in
        ppfilter_connect_out, // sf_connect_out
        NULL, // sf_bind
        NULL, // sf_setoption
        NULL, // sf_getoption
        NULL, // sf_listen
        NULL, // sf_ioctl
    };

    int i;
    for(i=0; i < PP_DYN_ENTRIES_COUNT; ++i)
        pp_dyn_entries[i].addr = INADDR_NONE;

    pp_dynlck = lck_spin_alloc_init(pp_spin_grp, LCK_ATTR_NULL);
    if (!pp_dynlck)
        return (ENOMEM);

    errno_t err;

    pp_filter.sf_handle = PP_FILTER_TCP_HANDLE;
    // data filter is used for PASV FTP support
    pp_filter.sf_data_in = ppfilter_data_in;
    pp_filter.sf_name = "PeerGuardian TCP";
    if ((err = sflt_register(&pp_filter, AF_INET, SOCK_STREAM, IPPROTO_TCP))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        return (err);
    }

    pp_filter.sf_handle = PP_FILTER_TCP6_HANDLE;
    pp_filter.sf_name = "PeerGuardian TCP6";
    if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_STREAM, IPPROTO_TCP))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone);
        goto filter_register_exit;
    }
    pp_filter.sf_data_in = (typeof(pp_filter.sf_data_in))NULL;

    // UDP can "connect", but it can also just send the data, so we need to monitor both
    pp_filter.sf_data_in = ppfilter_data_in_raw;
    pp_filter.sf_data_out = ppfilter_data_out_raw;

    pp_filter.sf_handle = PP_FILTER_UDP_HANDLE;
    pp_filter.sf_name = "PeerGuardian UDP";
    if ((err = sflt_register(&pp_filter, AF_INET, SOCK_DGRAM, IPPROTO_UDP))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone);
        pp_filter_deregister_handle(PP_FILTER_TCP6_HANDLE, &tcp6FiltDone);
        goto filter_register_exit;
    }

    pp_filter.sf_handle = PP_FILTER_UDP6_HANDLE;
    pp_filter.sf_name = "PeerGuardian UDP6";
    if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_DGRAM, IPPROTO_UDP))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        pp_filter_deregister_handle(PP_FILTER_TCP_HANDLE, &tcpFiltDone);
        pp_filter_deregister_handle(PP_FILTER_TCP6_HANDLE, &tcp6FiltDone);
        pp_filter_deregister_handle(PP_FILTER_UDP_HANDLE, &udpFiltDone);
        goto filter_register_exit;
    }

    // RAW sockets don't "connect", they just send/recv data
    pp_filter.sf_connect_in = (typeof(pp_filter.sf_connect_in))NULL;
    pp_filter.sf_connect_out = (typeof(pp_filter.sf_connect_out))NULL;

    // Failures of the following are not fatal
    pp_filter.sf_handle = PP_FILTER_ICMP_HANDLE;
    pp_filter.sf_name = "PeerGuardian ICMP";
    if ((err = sflt_register(&pp_filter, AF_INET, SOCK_RAW, IPPROTO_ICMP))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        icmpFiltDone = -1;
    }

    pp_filter.sf_handle = PP_FILTER_ICMP6_HANDLE;
    pp_filter.sf_name = "PeerGuardian ICMP6";
    if ((err = sflt_register(&pp_filter, AF_INET6, SOCK_RAW, IPPROTO_ICMPV6))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        icmp6FiltDone = -1;
    }

    pp_filter.sf_handle = PP_FILTER_RAW_HANDLE;
    pp_filter.sf_name = "PeerGuardian RAW";
    if ((err = sflt_register(&pp_filter, AF_INET, SOCK_RAW, IPPROTO_RAW))) {
        printf("PeerGuardian: Failed to register '%s' filter: %d.\n", pp_filter.sf_name, err);
        rawFiltDone = -1;
    }

    err = 0;

filter_register_exit:
    if (err && pp_dynlck)
        lck_spin_free(pp_dynlck, pp_spin_grp);

    return (err);
}
Exemple #6
0
/* called when the last reference to the qp is dropped */
void rxe_qp_cleanup(struct rxe_pool_entry *arg)
{
	struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);

	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
}
Exemple #7
0
static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
			 enum ip_conntrack_info ctinfo,
			 unsigned char **data, int dataoff,
			 Setup_UUIE *setup)
{
	int dir = CTINFO2DIR(ctinfo);
	int ret;
	int i;
	__be16 port;
	union nf_inet_addr addr;
	typeof(set_h225_addr_hook) set_h225_addr;

	pr_debug("nf_ct_q931: Setup\n");

	if (setup->options & eSetup_UUIE_h245Address) {
		ret = expect_h245(skb, ct, ctinfo, data, dataoff,
				  &setup->h245Address);
		if (ret < 0)
			return -1;
	}

	set_h225_addr = rcu_dereference(set_h225_addr_hook);
	if ((setup->options & eSetup_UUIE_destCallSignalAddress) &&
	    (set_h225_addr) && ct->status & IPS_NAT_MASK &&
	    get_h225_addr(ct, *data, &setup->destCallSignalAddress,
			  &addr, &port) &&
	    memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) {
		pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n",
			 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3,
			 ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port));
		ret = set_h225_addr(skb, data, dataoff,
				    &setup->destCallSignalAddress,
				    &ct->tuplehash[!dir].tuple.src.u3,
				    ct->tuplehash[!dir].tuple.src.u.tcp.port);
		if (ret < 0)
			return -1;
	}

	if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) &&
	    (set_h225_addr) && ct->status & IPS_NAT_MASK &&
	    get_h225_addr(ct, *data, &setup->sourceCallSignalAddress,
			  &addr, &port) &&
	    memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) {
		pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n",
			 &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3,
			 ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port));
		ret = set_h225_addr(skb, data, dataoff,
				    &setup->sourceCallSignalAddress,
				    &ct->tuplehash[!dir].tuple.dst.u3,
				    ct->tuplehash[!dir].tuple.dst.u.tcp.port);
		if (ret < 0)
			return -1;
	}

	if (setup->options & eSetup_UUIE_fastStart) {
		for (i = 0; i < setup->fastStart.count; i++) {
			ret = process_olc(skb, ct, ctinfo, data, dataoff,
					  &setup->fastStart.item[i]);
			if (ret < 0)
				return -1;
		}
	}

	return 0;
}
Exemple #8
0
int main() {
	int a = 5;
	int b = typeof(a);
	printf("%i", b);
}
/*
 * Replace or place a tile.
 */
widp
wid_game_map_client_replace_tile (widp w, 
                                  double x, double y, 
                                  thingp t,
                                  tpp tp)
{
    tree_rootp thing_tiles;
    const char *tilename;
    tilep tile;
    widp child;

    verify(w);

    /*
     * Grow tl and br to fit the template thing. Use the first tile.
     */
    if (!tp) {
        tp = thing_tp(t);
        if (!tp) {
            ERR("no thing template to replace on client");
            return (0);
        }
    }

    if ((x < 0) || (y < 0) || (x >= MAP_WIDTH) || (y >= MAP_WIDTH)) {
        LOG("client: thing template [%s] cannot be placed at %f %f",
            tp_short_name(tp), x, y);
        return (0);
    }

    thing_tiles = tp_get_tiles(tp);
    if (!thing_tiles) {
        ERR("thing template [%s] has no tiles", tp_short_name(tp));
        return (0);
    }

    thing_tilep thing_tile;

    /*
     * Get a random tile to start with.
     */
    thing_tile = (typeof(thing_tile)) thing_tile_random(thing_tiles);

    /*
     * Find the real tile that corresponds to this name.
     */
    tilename = thing_tile_name(thing_tile);
    tile = tile_find(tilename);

    if (!tile) {
        ERR("tile name %s from thing %s not found on client",
            tilename,
            tp_short_name(tp));
        return (0);
    }

    /*
     * Make a new thing.
     */
    child = wid_new_square_button(wid_game_map_client_grid_container,
                                  "client map tile");

    wid_set_mode(child, WID_MODE_NORMAL);
    wid_set_no_shape(child);

    /*
     * "paint" the thing.
     */
    wid_game_map_client_set_thing_template(child, tp);

    if (!t) {
        t = thing_client_local_new(tp);
    }

    wid_set_thing(child, t);
    wid_set_tile(child, tile);

    double dx = 0;
    double dy = 0;

    /*
     * Does it appear as a different size on screen?
     */
    double scale = tp_get_scale(tp);

    /*
     * So we have baby and bigger slimes. But alas this is visual only and has 
     * no effect on hp on the server yet.
     */
    if (thing_is_variable_size(t)) {
        scale += gaussrand(0.0, 0.05);
    }

    if (scale != 1.0) {
        wid_scaling_blit_to_pct_in(child, scale, scale, 500, 9999999);
    }

    if (thing_is_cloud_effect(t)) {
        /*
         * The epicenter needs to be where it was on the server as we do a 
         * flood fill to see where the rest of the explosion goes.
         */
        if (!t->is_epicenter) {
            dx = gaussrand(0.0, 0.5);
            dy = gaussrand(0.0, 0.5);
        }

        wid_fade_out(child, 1000);
    }

    thing_client_wid_update(t, x + dx, y + dy, false /* smooth */,
                            true /* is new */);

    /*
     * Offset tall things
     */
    if (scale != 1.0) {
        if (thing_is_blit_y_offset(t)) {
            wid_set_blit_y_offset(child, 
                                  wid_get_height(child) * scale * -((scale - 1.0) / 2.0));
        }
    }

    /*
     * If this is a pre-existing thing perhaps being recreated ona new level
     * then it will have a direction already. Update it.
     */
    if (thing_is_animated(t)) {
        thing_animate(t);
    }

    /*
     * This adds it to the grid wid.
     */
#ifdef DEBUG_CLIENT_THING
    wid_update(child);
    char name[20];
    sprintf(name, "%d",t->thing_id);
    wid_set_text(child,name);
#endif

    /*
     * We've been told about the epicenter of an explsion, now emulate the 
     * blast.
     */
    if (t->is_epicenter && thing_is_cloud_effect(t) ) {

        if ((tp->id == THING_EXPLOSION1)        ||
            (tp->id == THING_EXPLOSION2)        ||
            (tp->id == THING_EXPLOSION3)        ||
            (tp->id == THING_EXPLOSION4)        ||
            (tp->id == THING_SMALL_EXPLOSION1)  ||
            (tp->id == THING_SMALL_EXPLOSION2)  ||
            (tp->id == THING_SMALL_EXPLOSION3)  ||
            (tp->id == THING_SMALL_EXPLOSION4)  ||
            (tp->id == THING_MED_EXPLOSION1)    ||
            (tp->id == THING_MED_EXPLOSION2)    ||
            (tp->id == THING_MED_EXPLOSION3)    ||
            (tp->id == THING_MED_EXPLOSION4)    ||
            (tp->id == THING_FIREBURST1)        ||
            (tp->id == THING_FIREBURST2)        ||
            (tp->id == THING_FIREBURST3)        ||
            (tp->id == THING_FIREBURST4)        ||
            (tp->id == THING_BOMB)              ||
            (tp->id == THING_POISON1)           ||
            (tp->id == THING_POISON2)           ||
            (tp->id == THING_CLOUDKILL1)        ||
            (tp->id == THING_CLOUDKILL2)) {

            level_place_explosion(client_level,
                                  0, /* owner */
                                  tp,
                                  t->x, t->y,
                                  t->x, t->y);
        } else {
            ERR("unknown explosion %s", thing_logname(t));
        }
    }

    const char *sound = tp_sound_on_creation(tp);
    if (sound) {
        if (thing_is_cloud_effect(t)) {
            if (t->is_epicenter) {
                sound_play_at(sound, t->x, t->y);
            }
        } else {
            sound_play_at(sound, t->x, t->y);
        }
    }

    return (child);
}
Exemple #10
0
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_rx(struct vhost_net *net)
{
	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
	unsigned uninitialized_var(in), log;
	struct vhost_log *vq_log;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
		.msg_controllen = 0,
		.msg_iov = vq->iov,
		.msg_flags = MSG_DONTWAIT,
	};

	struct virtio_net_hdr_mrg_rxbuf hdr = {
		.hdr.flags = 0,
		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
	};

	size_t total_len = 0;
	int err, headcount, mergeable;
	size_t vhost_hlen, sock_hlen;
	size_t vhost_len, sock_len;

	struct socket *sock = rcu_dereference(vq->private_data);

	if (!sock)
		return;

	mutex_lock(&vq->mutex);
	vhost_disable_notify(&net->dev, vq);
	vhost_hlen = vq->vhost_hlen;
	sock_hlen = vq->sock_hlen;

	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
		vq->log : NULL;
	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);

	while ((sock_len = peek_head_len(sock->sk))) {
		sock_len += sock_hlen;
		vhost_len = sock_len + vhost_hlen;
		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
					&in, vq_log, &log,
					likely(mergeable) ? UIO_MAXIOV : 1);
		/* On error, stop handling until the next kick. */
		if (unlikely(headcount < 0))
			break;
		/* OK, now we need to know about added descriptors. */
		if (!headcount) {
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				/* They have slipped one in as we were
				 * doing that: check again. */
				vhost_disable_notify(&net->dev, vq);
				continue;
			}
			/* Nothing new?  Wait for eventfd to tell us
			 * they refilled. */
			break;
		}
		/* We don't need to be notified again. */
		if (unlikely((vhost_hlen)))
			/* Skip header. TODO: support TSO. */
			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
		else
			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
			 * needed because sendmsg can modify msg_iov. */
			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
		msg.msg_iovlen = in;
		err = sock->ops->recvmsg(NULL, sock, &msg,
					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
		/* Userspace might have consumed the packet meanwhile:
		 * it's not supposed to do this usually, but might be hard
		 * to prevent. Discard data we got (if any) and keep going. */
		if (unlikely(err != sock_len)) {
			pr_debug("Discarded rx packet: "
				 " len %d, expected %zd\n", err, sock_len);
			vhost_discard_vq_desc(vq, headcount);
			continue;
		}
		if (unlikely(vhost_hlen) &&
		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
				      vhost_hlen)) {
			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
			       vq->iov->iov_base);
			break;
		}
		/* TODO: Should check and handle checksum. */
		if (likely(mergeable) &&
		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
				      offsetof(typeof(hdr), num_buffers),
				      sizeof hdr.num_buffers)) {
			vq_err(vq, "Failed num_buffers write");
			vhost_discard_vq_desc(vq, headcount);
			break;
		}
		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
					    headcount);
		if (unlikely(vq_log))
			vhost_log_write(vq, vq_log, log, vhost_len);
		total_len += vhost_len;
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}

	mutex_unlock(&vq->mutex);
}

static void handle_tx_kick(struct vhost_work *work)
{
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

	handle_tx(net);
}

static void handle_rx_kick(struct vhost_work *work)
{
	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
						  poll.work);
	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);

	handle_rx(net);
}

static void handle_tx_net(struct vhost_work *work)
{
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_TX].work);
	handle_tx(net);
}

static void handle_rx_net(struct vhost_work *work)
{
	struct vhost_net *net = container_of(work, struct vhost_net,
					     poll[VHOST_NET_VQ_RX].work);
	handle_rx(net);
}

static int vhost_net_open(struct inode *inode, struct file *f)
{
	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
	struct vhost_dev *dev;
	int r;

	if (!n)
		return -ENOMEM;

	dev = &n->dev;
	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
	if (r < 0) {
		kfree(n);
		return r;
	}

	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
	n->tx_poll_state = VHOST_NET_POLL_DISABLED;

	f->private_data = n;

	return 0;
}

static void vhost_net_disable_vq(struct vhost_net *n,
				 struct vhost_virtqueue *vq)
{
	if (!vq->private_data)
		return;
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
		tx_poll_stop(n);
		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
	} else
		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
}

static void vhost_net_enable_vq(struct vhost_net *n,
				struct vhost_virtqueue *vq)
{
	struct socket *sock = vq->private_data;
	if (!sock)
		return;
	if (vq == n->vqs + VHOST_NET_VQ_TX) {
		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
		tx_poll_start(n, sock);
	} else
		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
}

static struct socket *vhost_net_stop_vq(struct vhost_net *n,
					struct vhost_virtqueue *vq)
{
	struct socket *sock;

	mutex_lock(&vq->mutex);
	sock = vq->private_data;
	vhost_net_disable_vq(n, vq);
	rcu_assign_pointer(vq->private_data, NULL);
	mutex_unlock(&vq->mutex);
	return sock;
}

static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
			   struct socket **rx_sock)
{
	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
}

static void vhost_net_flush_vq(struct vhost_net *n, int index)
{
	vhost_poll_flush(n->poll + index);
	vhost_poll_flush(&n->dev.vqs[index].poll);
}
Exemple #11
0
static int help(struct sk_buff *skb, unsigned int protoff,
		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff;
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct tcphdr _tcph;
	char *data, *data_limit, *ib_ptr;
	int dir = CTINFO2DIR(ctinfo);
	struct nf_conntrack_expect *exp;
	struct nf_conntrack_tuple *tuple;
	u_int32_t dcc_ip;
	u_int16_t dcc_port;
	__be16 port;
	int i, ret = NF_ACCEPT;
	char *addr_beg_p, *addr_end_p;
	typeof(nf_nat_irc_hook) nf_nat_irc;

	/* If packet is coming from IRC server */
	if (dir == IP_CT_DIR_REPLY)
		return NF_ACCEPT;

	/* Until there's been traffic both ways, don't look in packets. */
	if (ctinfo != IP_CT_ESTABLISHED &&
	    ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
		return NF_ACCEPT;

	/* Not a full tcp header? */
	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = protoff + th->doff*4;
	if (dataoff >= skb->len)
		return NF_ACCEPT;

	spin_lock_bh(&irc_buffer_lock);
	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
				    irc_buffer);
	BUG_ON(ib_ptr == NULL);

	data = ib_ptr;
	data_limit = ib_ptr + skb->len - dataoff;

	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
	while (data < data_limit - (19 + MINMATCHLEN)) {
		if (memcmp(data, "\1DCC ", 5)) {
			data++;
			continue;
		}
		data += 5;
		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */

		iph = ip_hdr(skb);
		pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
			 &iph->saddr, ntohs(th->source),
			 &iph->daddr, ntohs(th->dest));

		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
				/* no match */
				continue;
			}
			data += strlen(dccprotos[i]);
			pr_debug("DCC %s detected\n", dccprotos[i]);

			/* we have at least
			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
			 * data left (== 14/13 bytes) */
			if (parse_dcc((char *)data, data_limit, &dcc_ip,
				       &dcc_port, &addr_beg_p, &addr_end_p)) {
				pr_debug("unable to parse dcc command\n");
				continue;
			}
			pr_debug("DCC bound ip/port: %pI4:%u\n",
				&dcc_ip, dcc_port);

			/* dcc_ip can be the internal OR external (NAT'ed) IP */
			tuple = &ct->tuplehash[dir].tuple;
			if (tuple->src.u3.ip != htonl(dcc_ip) &&
			    tuple->dst.u3.ip != htonl(dcc_ip)) {
				if (net_ratelimit())
					printk(KERN_WARNING
						"Forged DCC command from %pI4: %pI4:%u\n",
						&tuple->src.u3.ip,
						&dcc_ip, dcc_port);
				continue;
			}

			exp = nf_conntrack_expect_alloc(ct);
			if (exp == NULL) {
				ret = NF_DROP;
				goto out;
			}
			tuple = &ct->tuplehash[!dir].tuple;
			port = htons(dcc_port);
			nf_conntrack_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
						 tuple->src.l3num,
						 NULL, &tuple->dst.u3,
						 IPPROTO_TCP, NULL, &port);

			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
				ret = nf_nat_irc(skb, ctinfo,
						 addr_beg_p - ib_ptr,
						 addr_end_p - addr_beg_p,
						 exp);
			else if (nf_conntrack_expect_related(exp) != 0)
				ret = NF_DROP;
			nf_conntrack_expect_put(exp);
			goto out;
		}
	}
 out:
	spin_unlock_bh(&irc_buffer_lock);
	return ret;
}
Exemple #12
0
static void update_domain_cpuid_info(struct domain *d,
                                     const xen_domctl_cpuid_t *ctl)
{
    switch ( ctl->input[0] )
    {
    case 0: {
        union {
            typeof(boot_cpu_data.x86_vendor_id) str;
            struct {
                uint32_t ebx, edx, ecx;
            } reg;
        } vendor_id = {
            .reg = {
                .ebx = ctl->ebx,
                .edx = ctl->edx,
                .ecx = ctl->ecx
            }
        };
        int old_vendor = d->arch.x86_vendor;

        d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);

        if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
        {
            struct vcpu *v;

            for_each_vcpu( d, v )
            hvm_update_guest_vendor(v);
        }

        break;
    }

    case 1:
        d->arch.x86 = (ctl->eax >> 8) & 0xf;
        if ( d->arch.x86 == 0xf )
            d->arch.x86 += (ctl->eax >> 20) & 0xff;
        d->arch.x86_model = (ctl->eax >> 4) & 0xf;
        if ( d->arch.x86 >= 0x6 )
            d->arch.x86_model |= (ctl->eax >> 12) & 0xf0;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
        {
            uint64_t mask = cpuidmask_defaults._1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_1d];

            /*
             * Must expose hosts HTT and X2APIC value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_x2apic )
                ecx |= cpufeat_mask(X86_FEATURE_X2APIC);
            if ( cpu_has_htt )
                edx |= cpufeat_mask(X86_FEATURE_HTT);

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                /*
                 * Intel masking MSRs are documented as AND masks.
                 * Experimentally, they are applied after OSXSAVE and APIC
                 * are fast-forwarded from real hardware state.
                 */
                mask &= ((uint64_t)edx << 32) | ecx;

                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * AMD masking MSRs are documented as overrides.
                 * Experimentally, fast-forwarding of the OSXSAVE and APIC
                 * bits from real hardware state only occurs if the MSR has
                 * the respective bits set.
                 */
                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->_1cd = mask;
        }
        break;

    case 6:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_6c) == LCAP_6c) )
        {
            uint64_t mask = cpuidmask_defaults._6c;

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= (~0ULL << 32) | ctl->ecx;

            d->arch.pv_domain.cpuidmasks->_6c = mask;
        }
        break;

    case 7:
        if ( ctl->input[1] != 0 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) )
        {
            uint64_t mask = cpuidmask_defaults._7ab0;
            uint32_t eax = ctl->eax;
            uint32_t ebx = ctl->ebx & pv_featureset[FEATURESET_7b0];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= ((uint64_t)eax << 32) | ebx;

            d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
        }
        break;

    case 0xd:
        if ( ctl->input[1] != 1 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_Da1) == LCAP_Da1) )
        {
            uint64_t mask = cpuidmask_defaults.Da1;
            uint32_t eax = ctl->eax & pv_featureset[FEATURESET_Da1];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
                mask &= (~0ULL << 32) | eax;

            d->arch.pv_domain.cpuidmasks->Da1 = mask;
        }
        break;

    case 0x80000001:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) )
        {
            uint64_t mask = cpuidmask_defaults.e1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_e1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_e1d];

            /*
             * Must expose hosts CMP_LEGACY value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_cmp_legacy )
                ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);

            /* If not emulating AMD, clear the duplicated features in e1d. */
            if ( d->arch.x86_vendor != X86_VENDOR_AMD )
                edx &= ~CPUID_COMMON_1D_FEATURES;

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                mask &= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * Fast-forward bits - Must be set in the masking MSR for
                 * fast-forwarding to occur in hardware.
                 */
                ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->e1cd = mask;
        }
        break;
    }
}
Exemple #13
0
/**
 * @brief South Bridge CIMx configuration
 *
 * should be called before execute CIMx function.
 * this function will be called in romstage and ramstage.
 */
void sb900_cimx_config(AMDSBCFG *sb_config)
{
	if (!sb_config) {
        printk(BIOS_INFO, "SB900 - Cfg.c - sb900_cimx_config - No sb_config.\n");
		return;
	}
    printk(BIOS_INFO, "SB900 - Cfg.c - sb900_cimx_config - Start.\n");
	memset(sb_config, 0, sizeof(AMDSBCFG));

	/* static Build Parameters */
	sb_config->BuildParameters.BiosSize				= BIOS_SIZE;
	sb_config->BuildParameters.LegacyFree			= LEGACY_FREE;
	sb_config->BuildParameters.WatchDogTimerBase	= WATCHDOG_TIMER_BASE_ADDRESS;	// Board Level
	sb_config->BuildParameters.AcpiGpe0BlkAddr		= GPE0_BLK_ADDRESS;				// Board Level
	sb_config->BuildParameters.CpuControlBlkAddr	= CPU_CNT_BLK_ADDRESS;			// Board Level
	sb_config->BuildParameters.AcpiPmTmrBlkAddr		= PM1_TMR_BLK_ADDRESS;			// Board Level
	sb_config->BuildParameters.AcpiPm1CntBlkAddr	= PM1_CNT_BLK_ADDRESS;			// Board Level
	sb_config->BuildParameters.AcpiPm1EvtBlkAddr	= PM1_EVT_BLK_ADDRESS;			// Board Level
	sb_config->BuildParameters.SioPmeBaseAddress	= SIO_PME_BASE_ADDRESS;			// Board Level
	sb_config->BuildParameters.SpiRomBaseAddress	= SPI_BASE_ADDRESS;				// Board Level
	sb_config->BuildParameters.Smbus0BaseAddress	= SMBUS0_BASE_ADDRESS;			// Board Level
	sb_config->BuildParameters.Smbus1BaseAddress	= SMBUS1_BASE_ADDRESS;			// Board Level

	/* Turn on CDROM and HDD Power */
	sb_config->SATAMODE.SataMode.SataClkMode		= SATA_CLK_RESERVED;

	// header
	sb_config->StdHeader.PcieBasePtr				= PCIEX_BASE_ADDRESS;

	// Build Parameters
	sb_config->BuildParameters.ImcEnableOverWrite	= IMC_ENABLE_OVER_WRITE;		// Internal Option
	sb_config->BuildParameters.UsbMsi				= USB_MSI;						// Internal Option
	sb_config->BuildParameters.HdAudioMsi			= HDAUDIO_MSI;					// Internal Option
	sb_config->BuildParameters.LpcMsi				= LPC_MSI;						// Internal Option
	sb_config->BuildParameters.PcibMsi				= PCIB_MSI;						// Internal Option
	sb_config->BuildParameters.AbMsi				= AB_MSI;						// Internal Option
	sb_config->BuildParameters.GecShadowRomBase		= GEC_SHADOWROM_BASE;			// Board Level
	sb_config->BuildParameters.HpetBase				= HPET_BASE_ADDRESS;			// Board Level
	sb_config->BuildParameters.SataIDESsid			= SATA_IDE_MODE_SSID;			// Board Level
	sb_config->BuildParameters.SataRAIDSsid			= SATA_RAID_MODE_SSID;			// Board Level
	sb_config->BuildParameters.SataRAID5Ssid		= SATA_RAID5_MODE_SSID;			// Board Level
	sb_config->BuildParameters.SataAHCISsid			= SATA_AHCI_SSID;				// Board Level
	sb_config->BuildParameters.OhciSsid				= OHCI_SSID;					// Board Level
	sb_config->BuildParameters.EhciSsid				= EHCI_SSID;					// Board Level
	sb_config->BuildParameters.Ohci4Ssid			= OHCI4_SSID;					// Board Level
	sb_config->BuildParameters.SmbusSsid			= SMBUS_SSID;					// Board Level
	sb_config->BuildParameters.IdeSsid				= IDE_SSID;						// Board Level
	sb_config->BuildParameters.AzaliaSsid			= AZALIA_SSID;					// Board Level
	sb_config->BuildParameters.LpcSsid				= LPC_SSID;						// Board Level
	// sb_config->BuildParameters.PCIBSsid				= PCIB_SSID;					// Field Retired

	//
	// Common Function
	//
	sb_config->SATAMODE.SataMode.SataController		= SATA_CONTROLLER;				// External Option
	sb_config->SATAMODE.SataMode.SataIdeCombMdPriSecOpt	= SATA_IDE_COMBMD_PRISEC_OPT;	// External Option
	sb_config->SATAMODE.SataMode.SataIdeCombinedMode	= SATA_IDECOMBINED_MODE;	// External Option
	sb_config->S3Resume								= 0;							// CIMx Internal Used
	sb_config->SpreadSpectrum						= INCHIP_SPREAD_SPECTRUM;		// Board Level
	sb_config->NbSbGen2								= INCHIP_NB_SB_GEN2;			// External Option
	sb_config->GppGen2								= INCHIP_GPP_GEN2;				// External Option
	sb_config->GppMemWrImprove						= INCHIP_GPP_MEMORY_WRITE_IMPROVE;	// Internal Option
	sb_config->S4Resume								= 0;							// CIMx Internal Used
	sb_config->SataClass							= CONFIG_SATA_CONTROLLER_MODE;	// INCHIP_SATA_MODE	// External Option
	sb_config->SataIdeMode							= INCHIP_IDE_MODE;				// External Option
	sb_config->sdConfig								= SB_SD_CONFIG;					// External Option
	sb_config->sdSpeed								= SB_SD_SPEED;					// Internal Option
	sb_config->sdBitwidth							= SB_SD_BITWIDTH;				// Internal Option
	sb_config->SataDisUnusedIdePChannel				= SATA_DISUNUSED_IDE_P_CHANNEL;	// External Option
	sb_config->SataDisUnusedIdeSChannel				= SATA_DISUNUSED_IDE_S_CHANNEL;	// External Option
	sb_config->IdeDisUnusedIdePChannel				= IDE_DISUNUSED_IDE_P_CHANNEL;	// External Option
	sb_config->IdeDisUnusedIdeSChannel				= IDE_DISUNUSED_IDE_S_CHANNEL;	// External Option
	sb_config->SATAESPPORT.SataEspPort.PORT0		= SATA_ESP_PORT0;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT1		= SATA_ESP_PORT1;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT2		= SATA_ESP_PORT2;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT3		= SATA_ESP_PORT3;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT4		= SATA_ESP_PORT4;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT5		= SATA_ESP_PORT5;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT6		= SATA_ESP_PORT6;				// Board Level
	sb_config->SATAESPPORT.SataEspPort.PORT7		= SATA_ESP_PORT7;				// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT0	= SATA_PORT_POWER_PORT0;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT1	= SATA_PORT_POWER_PORT1;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT2	= SATA_PORT_POWER_PORT2;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT3	= SATA_PORT_POWER_PORT3;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT4	= SATA_PORT_POWER_PORT4;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT5	= SATA_PORT_POWER_PORT5;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT6	= SATA_PORT_POWER_PORT6;		// Board Level
	sb_config->SATAPORTPOWER.SataPortPower.PORT7	= SATA_PORT_POWER_PORT7;		// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT0		= SATA_PORTMODE_PORT0;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT1		= SATA_PORTMODE_PORT1;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT2		= SATA_PORTMODE_PORT2;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT3		= SATA_PORTMODE_PORT3;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT4		= SATA_PORTMODE_PORT4;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT5		= SATA_PORTMODE_PORT5;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT6		= SATA_PORTMODE_PORT6;			// Board Level
	sb_config->SATAPORTMODE.SataPortMd.PORT7		= SATA_PORTMODE_PORT7;			// Board Level
	sb_config->SataAggrLinkPmCap					= INCHIP_SATA_AGGR_LINK_PM_CAP;	// Internal Option
	sb_config->SataPortMultCap						= INCHIP_SATA_PORT_MULT_CAP;	// Internal Option
	sb_config->SataClkAutoOff						= INCHIP_SATA_CLK_AUTO_OFF;		// External Option
	sb_config->SataPscCap							= INCHIP_SATA_PSC_CAP;			// External Option
	sb_config->SataFisBasedSwitching				= INCHIP_SATA_FIS_BASE_SW;		// External Option
	sb_config->SataCccSupport						= INCHIP_SATA_CCC_SUPPORT;		// External Option
	sb_config->SataSscCap							= INCHIP_SATA_SSC_CAP;			// External Option
	sb_config->SataMsiCapability					= INCHIP_SATA_MSI_CAP;			// Internal Option
	sb_config->SataForceRaid						= INCHIP_SATA_FORCE_RAID5;		// Internal Option
	sb_config->SataTargetSupport8Device				= CIMXSB_SATA_TARGET_8DEVICE_CAP;	// External Option
	sb_config->SataDisableGenericMode				= SATA_DISABLE_GENERIC_MODE_CAP;// External Option
	sb_config->SataAhciEnclosureManagement			= SATA_AHCI_ENCLOSURE_CAP;		// Internal Option
	sb_config->SataSgpio0							= SATA_GPIO_0_CAP;				// External Option
	sb_config->SataSgpio1							= SATA_GPIO_1_CAP;				// External Option
	sb_config->SataPhyPllShutDown					= SATA_PHY_PLL_SHUTDOWN;		// External Option
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT0	= SATA_HOTREMOVEL_ENH_PORT0;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT1	= SATA_HOTREMOVEL_ENH_PORT1;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT2	= SATA_HOTREMOVEL_ENH_PORT2;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT3	= SATA_HOTREMOVEL_ENH_PORT3;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT4	= SATA_HOTREMOVEL_ENH_PORT4;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT5	= SATA_HOTREMOVEL_ENH_PORT5;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT6	= SATA_HOTREMOVEL_ENH_PORT6;	// Board Level
	sb_config->SATAHOTREMOVALENH.SataHotRemoveEnhPort.PORT7	= SATA_HOTREMOVEL_ENH_PORT7;	// Board Level
	// USB
	sb_config->USBMODE.UsbMode.Ohci1				= INCHIP_USB_OHCI1_CINFIG;		// External Option
	sb_config->USBMODE.UsbMode.Ehci1				= INCHIP_USB_EHCI1_CINFIG;		// Internal Option*
	sb_config->USBMODE.UsbMode.Ohci2				= INCHIP_USB_OHCI2_CINFIG;		// External Option
	sb_config->USBMODE.UsbMode.Ehci2				= INCHIP_USB_EHCI2_CINFIG;		// Internal Option*
	sb_config->USBMODE.UsbMode.Ohci3				= INCHIP_USB_OHCI3_CINFIG;		// External Option
	sb_config->USBMODE.UsbMode.Ehci3				= INCHIP_USB_EHCI3_CINFIG;		// Internal Option*
	sb_config->USBMODE.UsbMode.Ohci4				= INCHIP_USB_OHCI4_CINFIG;		// External Option
	// GEC
	sb_config->GecConfig							= INCHIP_GEC_CONTROLLER;		// External Option
	sb_config->IrConfig								= SB_IR_CONTROLLER;				// External Option
	sb_config->XhciSwitch							= SB_XHCI_SWITCH;				// External Option
	// Azalia
	sb_config->AzaliaController						= INCHIP_AZALIA_CONTROLLER;		// External Option
	sb_config->AzaliaPinCfg							= INCHIP_AZALIA_PIN_CONFIG;		// Board Level
	sb_config->FrontPanelDetected					= INCHIP_FRONT_PANEL_DETECTED;	// Board Level
	sb_config->AZALIACONFIG.AzaliaSdinPin			= AZALIA_PIN_CONFIG;			// Board Level
	sb_config->AZOEMTBL.pAzaliaOemCodecTablePtr		= NULL;							// Board Level
	sb_config->AZOEMFPTBL.pAzaliaOemFpCodecTablePtr	= NULL;							// Board Level
	sb_config->AnyHT200MhzLink						= INCHIP_ANY_HT_200MHZ_LINK;	// Internal Option
	sb_config->HpetTimer							= SB_HPET_TIMER;				// External Option
	sb_config->AzaliaSnoop							= INCHIP_AZALIA_SNOOP;			// Internal Option*
	// Generic
	sb_config->NativePcieSupport					= INCHIP_NATIVE_PCIE_SUPPOORT;	// External Option
	// USB
	sb_config->UsbPhyPowerDown						= INCHIP_USB_PHY_POWER_DOWN;	// External Option
	sb_config->PcibClkStopOverride					= INCHIP_PCIB_CLK_STOP_OVERRIDE;// Internal Option
	// sb_config->HpetMsiDis							= 0;								// Field Retired
	// sb_config->ResetCpuOnSyncFlood					= 0;								// Field Retired
	// sb_config->PcibAutoClkCtr						= 0;								// Field Retired
	sb_config->OEMPROGTBL.OemProgrammingTablePtr	= (uintptr_t)NULL;							// Board Level
	sb_config->PORTCONFIG[0].PortCfg.PortPresent	= SB_GPP_PORT0;					// Board Level
	sb_config->PORTCONFIG[0].PortCfg.PortDetected	= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[0].PortCfg.PortIsGen2		= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[0].PortCfg.PortHotPlug	= 0;							// CIMx Internal Used
	// sb_config->PORTCONFIG[0].PortCfg.PortIntxMap		= 0;								// Field Retired
	sb_config->PORTCONFIG[1].PortCfg.PortPresent	= SB_GPP_PORT1;					// Board Level
	sb_config->PORTCONFIG[1].PortCfg.PortDetected	= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[1].PortCfg.PortIsGen2		= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[1].PortCfg.PortHotPlug	= 0;							// CIMx Internal Used
	// sb_config->PORTCONFIG[0].PortCfg.PortIntxMap		= 0;								// Field Retired
	sb_config->PORTCONFIG[2].PortCfg.PortPresent	= SB_GPP_PORT2;					// Board Level
	sb_config->PORTCONFIG[2].PortCfg.PortDetected	= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[2].PortCfg.PortIsGen2		= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[2].PortCfg.PortHotPlug	= 0;							// CIMx Internal Used
	// sb_config->PORTCONFIG[0].PortCfg.PortIntxMap		= 0;								// Field Retired
	sb_config->PORTCONFIG[3].PortCfg.PortPresent	= SB_GPP_PORT3;					// Board Level
	sb_config->PORTCONFIG[3].PortCfg.PortDetected	= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[3].PortCfg.PortIsGen2		= 0;							// CIMx Internal Used
	sb_config->PORTCONFIG[3].PortCfg.PortHotPlug	= 0;							// CIMx Internal Used
	// sb_config->PORTCONFIG[0].PortCfg.PortIntxMap		= 0;								// Field Retired
	sb_config->GppLinkConfig						= INCHIP_GPP_LINK_CONFIG;		// External Option
	sb_config->GppFoundGfxDev						= 0;							// CIMx Internal Used
	sb_config->GppFunctionEnable					= SB_GPP_CONTROLLER;			// External Option
	sb_config->GppUnhidePorts						= INCHIP_GPP_UNHIDE_PORTS;		// Internal Option
	sb_config->GppPortAspm							= INCHIP_GPP_PORT_ASPM;			// Internal Option
	sb_config->GppLaneReversal						= INCHIP_GPP_LANEREVERSAL;		// External Option
	sb_config->AlinkPhyPllPowerDown					= INCHIP_ALINK_PHY_PLL_POWER_DOWN;	// External Option
	sb_config->GppPhyPllPowerDown					= INCHIP_GPP_PHY_PLL_POWER_DOWN;// External Option
	sb_config->GppDynamicPowerSaving				= INCHIP_GPP_DYNAMIC_POWER_SAVING;	// External Option
	sb_config->PcieAER								= INCHIP_PCIE_AER;				// External Option
	sb_config->PcieRAS								= INCHIP_PCIE_RAS;				// External Option
	sb_config->GppHardwareDowngrade					= INCHIP_GPP_HARDWARE_DOWNGRADE;// Internal Option
	sb_config->GppToggleReset						= INCHIP_GPP_TOGGLE_RESET;		// External Option
	sb_config->sdbEnable							= 0;							// CIMx Internal Used
	sb_config->TempMMIO								= (typeof(sb_config->TempMMIO))NULL;							// CIMx Internal Used
	// sb_config->GecPhyStatus							= INCHIP_GEC_PHY_STATUS;		// Field Retired
	sb_config->SBGecPwr								= INCHIP_GEC_POWER_POLICY;		// Internal Option
	sb_config->SBGecDebugBus						= INCHIP_GEC_DEBUGBUS;			// Internal Option
	sb_config->SbPcieOrderRule						= INCHIP_SB_PCIE_ORDER_RULE;	// External Option
	sb_config->AcDcMsg								= INCHIP_ACDC_MSG;				// Internal Option
	sb_config->TimerTickTrack						= INCHIP_TIMER_TICK_TRACK;		// Internal Option
	sb_config->ClockInterruptTag					= INCHIP_CLOCK_INTERRUPT_TAG;	// Internal Option
	sb_config->OhciTrafficHanding					= INCHIP_OHCI_TRAFFIC_HANDING;	// Internal Option
	sb_config->EhciTrafficHanding					= INCHIP_EHCI_TRAFFIC_HANDING;	// Internal Option
	sb_config->FusionMsgCMultiCore					= INCHIP_FUSION_MSGC_MULTICORE;	// Internal Option
	sb_config->FusionMsgCStage						= INCHIP_FUSION_MSGC_STAGE;		// Internal Option
	sb_config->ALinkClkGateOff						= INCHIP_ALINK_CLK_GATE_OFF;	// External Option
	sb_config->BLinkClkGateOff						= INCHIP_BLINK_CLK_GATE_OFF;	// External Option
	// sb_config->sdb									= 0;								// Field Retired
	sb_config->GppGen2Strap							= 0;							// CIMx Internal Used
	sb_config->SlowSpeedABlinkClock					= INCHIP_SLOW_SPEED_ABLINK_CLOCK;	// Internal Option
	sb_config->DYNAMICGECROM.DynamicGecRomAddress_Ptr	= NULL;						// Board Level
	sb_config->AbClockGating						= INCHIP_AB_CLOCK_GATING;		// External Option
	sb_config->GppClockGating						= INCHIP_GPP_CLOCK_GATING;		// External Option
	sb_config->L1TimerOverwrite						= INCHIP_L1_TIMER_OVERWRITE;	// Internal Option
	// sb_config->UmiLinkWidth							= 0;								// Field Retired
	sb_config->UmiDynamicSpeedChange				= INCHIP_UMI_DYNAMIC_SPEED_CHANGE;	// Internal Option
	// sb_config->PcieRefClockOverclocking				= 0;								// Field Retired
	sb_config->SbAlinkGppTxDriverStrength			= INCHIP_ALINK_GPP_TX_DRV_STRENGTH;	// Internal Option
	sb_config->PwrFailShadow						= 0x02;							// Board Level
	sb_config->StressResetMode						= INCHIP_STRESS_RESET_MODE;		// Internal Option
	sb_config->hwm.fanSampleFreqDiv					= 0x03;							// Board Level
	sb_config->hwm.hwmSbtsiAutoPoll					= 1;							// Board Level

	/* General */
	sb_config->PciClks								= SB_PCI_CLOCK_RESERVED;
	sb_config->hwm.hwmEnable						= 0x0;

#ifndef __PRE_RAM__
	/* ramstage cimx config here */
	if (!sb_config->StdHeader.CALLBACK.CalloutPtr) {
		sb_config->StdHeader.CALLBACK.CalloutPtr = sb900_callout_entry;
	}

	//sb_config->
#endif //!__PRE_RAM__
    printk(BIOS_INFO, "SB900 - Cfg.c - sb900_cimx_config - End.\n");
}
Exemple #14
0
/**
 * @brief Register a RTDM device
 *
 * Registers a device in the RTDM namespace.
 *
 * @param[in] dev Device descriptor.
 *
 * @return 0 is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if the descriptor contains invalid
 * entries. RTDM_PROFILE_INFO() must appear in the list of
 * initializers for the driver properties.
 *
 * - -EEXIST is returned if the specified device name of protocol ID is
 * already in use.
 *
 * - -ENOMEM is returned if a memory allocation failed in the process
 * of registering the device.
 *
 * @coretags{secondary-only}
 */
int rtdm_dev_register(struct rtdm_device *dev)
{
	int ret, pos, major, minor;
	struct device *kdev = NULL;
	struct rtdm_driver *drv;
	xnkey_t id;
	dev_t rdev;

	secondary_mode_only();

	if (!realtime_core_enabled())
		return -ENOSYS;

	mutex_lock(&register_lock);

	dev->name = NULL;
	drv = dev->driver;
	pos = atomic_read(&drv->refcount);
	ret = register_driver(drv);
	if (ret) {
		mutex_unlock(&register_lock);
		return ret;
	}

	dev->ops = drv->ops;
	if (drv->device_flags & RTDM_NAMED_DEVICE)
		dev->ops.socket = (typeof(dev->ops.socket))enosys;
	else
		dev->ops.open = (typeof(dev->ops.open))enosys;

	init_waitqueue_head(&dev->putwq);
	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
	atomic_set(&dev->refcount, 0);

	if (drv->device_flags & RTDM_FIXED_MINOR) {
		minor = dev->minor;
		if (minor < 0 || minor >= drv->device_count) {
			ret = -EINVAL;
			goto fail;
		}
	} else
		dev->minor = minor = pos;

	if (drv->device_flags & RTDM_NAMED_DEVICE) {
		major = drv->named.major;
		dev->name = kasformat(dev->label, minor);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		ret = xnregistry_enter(dev->name, dev,
				       &dev->named.handle, NULL);
		if (ret)
			goto fail;

		rdev = MKDEV(major, minor);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->label, minor);
		if (IS_ERR(kdev)) {
			xnregistry_remove(dev->named.handle);
			ret = PTR_ERR(kdev);
			goto fail;
		}
	} else {
		dev->name = kstrdup(dev->label, GFP_KERNEL);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		rdev = MKDEV(0, 0);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->name);
		if (IS_ERR(kdev)) {
			ret = PTR_ERR(kdev);
			goto fail;
		}

		id = get_proto_id(drv->protocol_family, drv->socket_type);
		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
		if (ret < 0)
			goto fail;
	}

	dev->rdev = rdev;
	dev->kdev = kdev;
	dev->magic = RTDM_DEVICE_MAGIC;

	mutex_unlock(&register_lock);

	trace_cobalt_device_register(dev);

	return 0;
fail:
	if (kdev)
		device_destroy(rtdm_class, rdev);

	unregister_driver(drv);

	mutex_unlock(&register_lock);

	if (dev->name)
		kfree(dev->name);

	return ret;
}
Exemple #15
0
void EspeakTTSWorker::run() {

  //  int freq = cst_wave_sample_rate(w);
    int freq = espeak_Initialize(AUDIO_OUTPUT_RETRIEVAL, buflength, NULL, 0);
//  espeak_EVENT_TYPE freq   = espeak_EVENT_TYPE-> espeakEVENT_SAMPLERATE = 8       // internal use, set sample rate


  //  int numchannels = cst_wave_num_channels(w);    // can not find alternative in espeak
    int numchannels = 1;                                                 //mono setting
  //int samplesize_bytes = sizeof(typeof(*(w->samples)));  // search in struct . sample (short * sample )
   int samplesize_bytes = sizeof(typeof(sounddata));
   int samplesize = samplesize_bytes * 8;           //size in bits


    //short* buf = (short*)(cst_wave_samples(w));      // sounddata
  //  short* buf = (short*)(sounddata);
    short* buf = waves;

   // int numsamples = cst_wave_num_samples(w);
   qDebug() <<"Number of Samples  :"<<counter<<endl;
// numsamples: is the number of entries in wav. SynthCallback(short *wav, int numsamples, espeak_EVENT *events);


//Qt Functions need to change the passed parameter to be from Espeak
    m_format.setFrequency(freq);          //done
    m_format.setChannels(numchannels);
    m_format.setSampleSize(samplesize); //bits per sample




//not changed
    m_format.setCodec("audio/pcm");
    m_format.setByteOrder(QAudioFormat::LittleEndian);
    m_format.setSampleType(QAudioFormat::SignedInt);






    if (!info->isFormatSupported(m_format)) {
        std::cerr << "Default format not supported - trying to use nearest";
        m_format = info->nearestFormat(m_format);
    }


    QAudioOutput m_audioOutput(m_format, 0);
//    connect(m_audioOutput,SIGNAL(stateChanged(QAudio::State)),this,SLOT(finishedPlaying(QAudio::State)));


    int sizeinbytes = counter * samplesize_bytes;
    b.open(QIODevice::ReadWrite);   // b for Qbuffer
    b.write((char*)buf, sizeinbytes);
    b.seek(0);
    m_audioOutput.start(&b);
    //hold until sound is done
    QEventLoop loop;
    QObject::connect(&m_audioOutput, SIGNAL(stateChanged(QAudio::State)), &loop, SLOT(quit()));
    do {
        loop.exec();
    } while(m_audioOutput.state() == QAudio::ActiveState);
}
/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid)
{
	struct nf_conntrack_expect *exp_orig, *exp_reply;
	enum ip_conntrack_dir dir;
	int ret = 1;
	typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre;

	exp_orig = nf_ct_expect_alloc(ct);
	if (exp_orig == NULL)
		goto out;

	exp_reply = nf_ct_expect_alloc(ct);
	if (exp_reply == NULL)
		goto out_put_orig;

	/* original direction, PNS->PAC */
	dir = IP_CT_DIR_ORIGINAL;
	nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT,
			  nf_ct_l3num(ct),
			  &ct->tuplehash[dir].tuple.src.u3,
			  &ct->tuplehash[dir].tuple.dst.u3,
			  IPPROTO_GRE, &peer_callid, &callid);
	exp_orig->expectfn = pptp_expectfn;

	/* reply direction, PAC->PNS */
	dir = IP_CT_DIR_REPLY;
	nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT,
			  nf_ct_l3num(ct),
			  &ct->tuplehash[dir].tuple.src.u3,
			  &ct->tuplehash[dir].tuple.dst.u3,
			  IPPROTO_GRE, &callid, &peer_callid);
	exp_reply->expectfn = pptp_expectfn;

	nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre);
	if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK)
		nf_nat_pptp_exp_gre(exp_orig, exp_reply);
	if (nf_ct_expect_related(exp_orig) != 0)
		goto out_put_both;
	if (nf_ct_expect_related(exp_reply) != 0)
		goto out_unexpect_orig;

	/* Add GRE keymap entries */
	if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0)
		goto out_unexpect_both;
	if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) {
		nf_ct_gre_keymap_destroy(ct);
		goto out_unexpect_both;
	}
	ret = 0;

out_put_both:
	nf_ct_expect_put(exp_reply);
out_put_orig:
	nf_ct_expect_put(exp_orig);
out:
	return ret;

out_unexpect_both:
	nf_ct_unexpect_related(exp_reply);
out_unexpect_orig:
	nf_ct_unexpect_related(exp_orig);
	goto out_put_both;
}
Exemple #17
0
/**
 * @brief	Create SEC shared descriptor
 * @param[in]	mode -	To check whether descriptor is for encryption or
 *		decryption
 * @param[in]	crypto_info - test parameters
 * @return	Shared descriptor pointer on success, otherwise NULL
 */
static void *create_descriptor(bool mode, void *params)
{
	struct test_param *crypto_info = (struct test_param *)params;
	struct protocol_info *proto = crypto_info->proto;
	struct wifi_ref_vector_s *ref_test_vector = proto->proto_vector;
	struct sec_descriptor_t *prehdr_desc;
	struct alginfo cipher_info;
	uint32_t *shared_desc = NULL;
	unsigned shared_desc_len;
	int i;
	bool found = 0;

	prehdr_desc = __dma_mem_memalign(L1_CACHE_BYTES,
					 sizeof(struct sec_descriptor_t));
	if (unlikely(!prehdr_desc)) {
		fprintf(stderr,
			"error: %s: dma_mem_memalign failed for preheader\n",
			__func__);
		return NULL;
	}

	/* Store the pointer to the descriptor for freeing later on */
	for (i = mode ? 0 : 1; i < proto->num_cpus * FQ_PER_CORE * 2; i += 2) {
		mutex_lock(&proto->desc_wlock);
		if (proto->descr[i].descr == NULL) {
			proto->descr[i].descr = (uint32_t *)prehdr_desc;
			proto->descr[i].mode = mode;
			found = 1;
			mutex_unlock(&proto->desc_wlock);
			break;
		}
		mutex_unlock(&proto->desc_wlock);
	}

	if (!found) {
		pr_err("Could not store descriptor pointer %s\n", __func__);
		return NULL;
	}

	memset(prehdr_desc, 0, sizeof(struct sec_descriptor_t));
	shared_desc = (typeof(shared_desc))&prehdr_desc->descbuf;

	cipher_info.key = ref_test_vector->key;
	cipher_info.keylen = WIFI_KEY_SIZE;
	cipher_info.key_enc_flags = 0;

	if (ENCRYPT == mode)
		cnstr_shdsc_wifi_encap(shared_desc,
				       &shared_desc_len,
/*
* This is currently hardcoded. The application doesn't allow for
* proper retrieval of PS.
*/
				       0,
				       ref_test_vector->mac_hdr_len,
				       ref_test_vector->pn,
				       ref_test_vector->priority,
				       ref_test_vector->key_id,
				       &cipher_info);
	else
		cnstr_shdsc_wifi_decap(shared_desc,
				       &shared_desc_len,
/*
* This is currently hardcoded. The application doesn't allow for
* proper retrieval of PS.
*/
				       0,
				       ref_test_vector->mac_hdr_len,
				       ref_test_vector->pn,
				       ref_test_vector->priority,
				       &cipher_info);

	prehdr_desc->prehdr.hi.word = shared_desc_len & SEC_PREHDR_SDLEN_MASK;

	pr_debug("SEC %s shared descriptor:\n", proto->name);

	for (i = 0; i < shared_desc_len; i++)
		pr_debug("0x%x\n", *shared_desc++);

	return prehdr_desc;
}
static inline int
pptp_inbound_pkt(struct sk_buff *skb,
		 struct PptpControlHeader *ctlh,
		 union pptp_ctrl_union *pptpReq,
		 unsigned int reqlen,
		 struct nf_conn *ct,
		 enum ip_conntrack_info ctinfo)
{
	struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
	u_int16_t msg;
	__be16 cid = 0, pcid = 0;
	typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;

	msg = ntohs(ctlh->messageType);
	pr_debug("inbound control message %s\n", pptp_msg_name[msg]);

	switch (msg) {
	case PPTP_START_SESSION_REPLY:
		/* server confirms new control session */
		if (info->sstate < PPTP_SESSION_REQUESTED)
			goto invalid;
		if (pptpReq->srep.resultCode == PPTP_START_OK)
			info->sstate = PPTP_SESSION_CONFIRMED;
		else
			info->sstate = PPTP_SESSION_ERROR;
		break;

	case PPTP_STOP_SESSION_REPLY:
		/* server confirms end of control session */
		if (info->sstate > PPTP_SESSION_STOPREQ)
			goto invalid;
		if (pptpReq->strep.resultCode == PPTP_STOP_OK)
			info->sstate = PPTP_SESSION_NONE;
		else
			info->sstate = PPTP_SESSION_ERROR;
		break;

	case PPTP_OUT_CALL_REPLY:
		/* server accepted call, we now expect GRE frames */
		if (info->sstate != PPTP_SESSION_CONFIRMED)
			goto invalid;
		if (info->cstate != PPTP_CALL_OUT_REQ &&
		    info->cstate != PPTP_CALL_OUT_CONF)
			goto invalid;

		cid = pptpReq->ocack.callID;
		pcid = pptpReq->ocack.peersCallID;
		if (info->pns_call_id != pcid)
			goto invalid;
		pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
			 ntohs(cid), ntohs(pcid));

		if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
			info->cstate = PPTP_CALL_OUT_CONF;
			info->pac_call_id = cid;
			exp_gre(ct, cid, pcid);
		} else
			info->cstate = PPTP_CALL_NONE;
		break;

	case PPTP_IN_CALL_REQUEST:
		/* server tells us about incoming call request */
		if (info->sstate != PPTP_SESSION_CONFIRMED)
			goto invalid;

		cid = pptpReq->icreq.callID;
		pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
		info->cstate = PPTP_CALL_IN_REQ;
		info->pac_call_id = cid;
		break;

	case PPTP_IN_CALL_CONNECT:
		/* server tells us about incoming call established */
		if (info->sstate != PPTP_SESSION_CONFIRMED)
			goto invalid;
		if (info->cstate != PPTP_CALL_IN_REP &&
		    info->cstate != PPTP_CALL_IN_CONF)
			goto invalid;

		pcid = pptpReq->iccon.peersCallID;
		cid = info->pac_call_id;

		if (info->pns_call_id != pcid)
			goto invalid;

		pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
		info->cstate = PPTP_CALL_IN_CONF;

		/* we expect a GRE connection from PAC to PNS */
		exp_gre(ct, cid, pcid);
		break;

	case PPTP_CALL_DISCONNECT_NOTIFY:
		/* server confirms disconnect */
		cid = pptpReq->disc.callID;
		pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
		info->cstate = PPTP_CALL_NONE;

		/* untrack this call id, unexpect GRE packets */
		pptp_destroy_siblings(ct);
		break;

	case PPTP_WAN_ERROR_NOTIFY:
	case PPTP_ECHO_REQUEST:
	case PPTP_ECHO_REPLY:
		/* I don't have to explain these ;) */
		break;

	default:
		goto invalid;
	}

	nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound);
	if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK)
		return nf_nat_pptp_inbound(skb, ct, ctinfo, ctlh, pptpReq);
	return NF_ACCEPT;

invalid:
	pr_debug("invalid %s: type=%d cid=%u pcid=%u "
		 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
		 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
		 msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
		 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
	return NF_ACCEPT;
}
Exemple #19
0
static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
			   enum ip_conntrack_info ctinfo,
			   unsigned char **data, int dataoff,
			   H245_TransportAddress *taddr)
{
	int dir = CTINFO2DIR(ctinfo);
	int ret = 0;
	__be16 port;
	__be16 rtp_port, rtcp_port;
	union nf_inet_addr addr;
	struct nf_conntrack_expect *rtp_exp;
	struct nf_conntrack_expect *rtcp_exp;
	typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp;

	
	if (!get_h245_addr(ct, *data, taddr, &addr, &port) ||
	    memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) ||
	    port == 0)
		return 0;

	
	port &= htons(~1);
	rtp_port = port;
	rtcp_port = htons(ntohs(port) + 1);

	
	if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
		return -1;
	nf_ct_expect_init(rtp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
			  &ct->tuplehash[!dir].tuple.src.u3,
			  &ct->tuplehash[!dir].tuple.dst.u3,
			  IPPROTO_UDP, NULL, &rtp_port);

	
	if ((rtcp_exp = nf_ct_expect_alloc(ct)) == NULL) {
		nf_ct_expect_put(rtp_exp);
		return -1;
	}
	nf_ct_expect_init(rtcp_exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct),
			  &ct->tuplehash[!dir].tuple.src.u3,
			  &ct->tuplehash[!dir].tuple.dst.u3,
			  IPPROTO_UDP, NULL, &rtcp_port);

	if (memcmp(&ct->tuplehash[dir].tuple.src.u3,
		   &ct->tuplehash[!dir].tuple.dst.u3,
		   sizeof(ct->tuplehash[dir].tuple.src.u3)) &&
		   (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) &&
		   ct->status & IPS_NAT_MASK) {
		
		ret = nat_rtp_rtcp(skb, ct, ctinfo, data, dataoff,
				   taddr, port, rtp_port, rtp_exp, rtcp_exp);
	} else {		
		if (nf_ct_expect_related(rtp_exp) == 0) {
			if (nf_ct_expect_related(rtcp_exp) == 0) {
				pr_debug("nf_ct_h323: expect RTP ");
				nf_ct_dump_tuple(&rtp_exp->tuple);
				pr_debug("nf_ct_h323: expect RTCP ");
				nf_ct_dump_tuple(&rtcp_exp->tuple);
			} else {
				nf_ct_unexpect_related(rtp_exp);
				ret = -1;
			}
		} else
			ret = -1;
	}

	nf_ct_expect_put(rtp_exp);
	nf_ct_expect_put(rtcp_exp);

	return ret;
}
static inline int
pptp_outbound_pkt(struct sk_buff *skb,
		  struct PptpControlHeader *ctlh,
		  union pptp_ctrl_union *pptpReq,
		  unsigned int reqlen,
		  struct nf_conn *ct,
		  enum ip_conntrack_info ctinfo)
{
	struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
	u_int16_t msg;
	__be16 cid = 0, pcid = 0;
	typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;

	msg = ntohs(ctlh->messageType);
	pr_debug("outbound control message %s\n", pptp_msg_name[msg]);

	switch (msg) {
	case PPTP_START_SESSION_REQUEST:
		/* client requests for new control session */
		if (info->sstate != PPTP_SESSION_NONE)
			goto invalid;
		info->sstate = PPTP_SESSION_REQUESTED;
		break;

	case PPTP_STOP_SESSION_REQUEST:
		/* client requests end of control session */
		info->sstate = PPTP_SESSION_STOPREQ;
		break;

	case PPTP_OUT_CALL_REQUEST:
		/* client initiating connection to server */
		if (info->sstate != PPTP_SESSION_CONFIRMED)
			goto invalid;
		info->cstate = PPTP_CALL_OUT_REQ;
		/* track PNS call id */
		cid = pptpReq->ocreq.callID;
		pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
		info->pns_call_id = cid;
		break;

	case PPTP_IN_CALL_REPLY:
		/* client answers incoming call */
		if (info->cstate != PPTP_CALL_IN_REQ &&
		    info->cstate != PPTP_CALL_IN_REP)
			goto invalid;

		cid = pptpReq->icack.callID;
		pcid = pptpReq->icack.peersCallID;
		if (info->pac_call_id != pcid)
			goto invalid;
		pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
			 ntohs(cid), ntohs(pcid));

		if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
			/* part two of the three-way handshake */
			info->cstate = PPTP_CALL_IN_REP;
			info->pns_call_id = cid;
		} else
			info->cstate = PPTP_CALL_NONE;
		break;

	case PPTP_CALL_CLEAR_REQUEST:
		/* client requests hangup of call */
		if (info->sstate != PPTP_SESSION_CONFIRMED)
			goto invalid;
		/* FUTURE: iterate over all calls and check if
		 * call ID is valid.  We don't do this without newnat,
		 * because we only know about last call */
		info->cstate = PPTP_CALL_CLEAR_REQ;
		break;

	case PPTP_SET_LINK_INFO:
	case PPTP_ECHO_REQUEST:
	case PPTP_ECHO_REPLY:
		/* I don't have to explain these ;) */
		break;

	default:
		goto invalid;
	}

	nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound);
	if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK)
		return nf_nat_pptp_outbound(skb, ct, ctinfo, ctlh, pptpReq);
	return NF_ACCEPT;

invalid:
	pr_debug("invalid %s: type=%d cid=%u pcid=%u "
		 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
		 msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
		 msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
		 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
	return NF_ACCEPT;
}
            static void Apply(Type::List &types)
            {
                types.emplace_back( typeof( First ) );

                TypeUnpacker<Types...>::Apply( types );
            }
Exemple #22
0
static uword
dhcp6_pd_reply_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
			vlib_frame_t * f)
{
  /* These cross the longjmp  boundary (vlib_process_wait_for_event)
   * and need to be volatile - to prevent them from being optimized into
   * a register - which could change during suspension */

  while (1)
    {
      vlib_process_wait_for_event (vm);
      uword event_type = DHCP6_PD_DP_REPLY_REPORT;
      void *event_data = vlib_process_get_event_data (vm, &event_type);

      int i;
      if (event_type == DHCP6_PD_DP_REPLY_REPORT)
	{
	  prefix_report_t *events = event_data;
	  for (i = 0; i < vec_len (events); i++)
	    {
	      u32 event_size =
		sizeof (vl_api_dhcp6_pd_reply_event_t) +
		vec_len (events[i].prefixes) *
		sizeof (vl_api_dhcp6_pd_prefix_info_t);
	      vl_api_dhcp6_pd_reply_event_t *event =
		clib_mem_alloc (event_size);
	      clib_memset (event, 0, event_size);

	      event->sw_if_index = htonl (events[i].body.sw_if_index);
	      event->server_index = htonl (events[i].body.server_index);
	      event->msg_type = events[i].body.msg_type;
	      event->T1 = htonl (events[i].body.T1);
	      event->T2 = htonl (events[i].body.T2);
	      event->inner_status_code =
		htons (events[i].body.inner_status_code);
	      event->status_code = htons (events[i].body.status_code);
	      event->preference = events[i].body.preference;

	      event->n_prefixes = htonl (vec_len (events[i].prefixes));
	      vl_api_dhcp6_pd_prefix_info_t *prefix =
		(typeof (prefix)) event->prefixes;
	      u32 j;
	      for (j = 0; j < vec_len (events[i].prefixes); j++)
		{
		  dhcp6_prefix_info_t *info = &events[i].prefixes[j];
		  memcpy (prefix->prefix, &info->prefix, 16);
		  prefix->prefix_length = info->prefix_length;
		  prefix->valid_time = htonl (info->valid_time);
		  prefix->preferred_time = htonl (info->preferred_time);
		  prefix++;
		}
	      vec_free (events[i].prefixes);

	      dhcp6_pd_client_public_main_t *dpcpm =
		&dhcp6_pd_client_public_main;
	      call_dhcp6_pd_reply_event_callbacks (event, dpcpm->functions);

	      vpe_client_registration_t *reg;
              /* *INDENT-OFF* */
              pool_foreach(reg, vpe_api_main.dhcp6_pd_reply_events_registrations,
              ({
                vl_api_registration_t *vl_reg;
                vl_reg =
                  vl_api_client_index_to_registration (reg->client_index);
                if (vl_reg && vl_api_can_send_msg (vl_reg))
                  {
                    vl_api_dhcp6_pd_reply_event_t *msg =
                      vl_msg_api_alloc (event_size);
                    clib_memcpy (msg, event, event_size);
                    msg->_vl_msg_id = htons (VL_API_DHCP6_PD_REPLY_EVENT);
                    msg->client_index = reg->client_index;
                    msg->pid = reg->client_pid;
                    vl_api_send_msg (vl_reg, (u8 *) msg);
                  }
              }));
              /* *INDENT-ON* */

	      clib_mem_free (event);
	    }
	}
static
void ppfilter_log (int blocked, const struct sockaddr *from, const struct sockaddr *to,
                   const struct sockaddr *remote, int protocol, const pp_msg_iprange_t *match,
                   const pg_table_id_t tableid, const pp_filter_cookie_t cookie, const mach_timespec_t *now)
{
    pp_log_entry_t *lep;
    typeof(lep->pplg_timestamp) ts;
    ts = ((typeof(ts))now->tv_sec) * 1000000ULL;
    ts += ((typeof(ts))now->tv_nsec) / 1000ULL;

    pp_logentries_lock();

    if ((lep = pp_log_get_entry(1))) {
        lep->pplg_timestamp = ts;
        if (blocked)
            lep->pplg_flags |= PP_LOG_BLOCKED;
        else
            lep->pplg_flags |= PP_LOG_ALLOWED;
        if (match->p2_name_idx >= 0) {
            pg_table_id_copy(tableid, lep->pplg_tableid);
            lep->pplg_name_idx = match->p2_name_idx;
        } else {
            bzero(lep->pplg_tableid, sizeof(lep->pplg_tableid));

            switch(match->p2_name_idx) {
            case PP_FLAG_DYN_ENTRY:
                lep->pplg_flags |= PP_LOG_DYN;
                break;
            case PP_FLAG_FLT_STALL:
                lep->pplg_flags |= PP_LOG_FLTR_STALL;
                break;
            default:
                break;
            }

            lep->pplg_name_idx = 0;
        }

        if (remote == from)
            lep->pplg_flags |= PP_LOG_RMT_FRM;
        else
            lep->pplg_flags |= PP_LOG_RMT_TO;

        if (IPPROTO_TCP == protocol)
            lep->pplg_flags |= PP_LOG_TCP;
        else if (IPPROTO_UDP == protocol)
            lep->pplg_flags |= PP_LOG_UDP;
        else if (IPPROTO_ICMP == protocol || IPPROTO_ICMPV6 == protocol)
            lep->pplg_flags |= PP_LOG_ICMP;

        // log addrs are in network order
        const struct sockaddr_in6 *addr6;
        if (AF_INET == from->sa_family) {
            lep->pplg_fromaddr = ((const struct sockaddr_in*)from)->sin_addr.s_addr;
            lep->pplg_fromport = ((const struct sockaddr_in*)from)->sin_port;
        } else if (AF_INET6 == from->sa_family) {
            addr6 = (const struct sockaddr_in6*)from;
            lep->pplg_fromaddr = PP_IN6_V4_MAPPED_ADDR(&addr6->sin6_addr);
            lep->pplg_fromport = addr6->sin6_port;
            lep->pplg_flags |= PP_LOG_IP6;
        }
        if (AF_INET == to->sa_family) {
            lep->pplg_toaddr = ((const struct sockaddr_in*)to)->sin_addr.s_addr;
            lep->pplg_toport = ((const struct sockaddr_in*)to)->sin_port;
        } else if (AF_INET6 == to->sa_family) {
            addr6 = (const struct sockaddr_in6*)to;
            lep->pplg_toaddr = PP_IN6_V4_MAPPED_ADDR(&addr6->sin6_addr);
            lep->pplg_toport = addr6->sin6_port;
            lep->pplg_flags |= PP_LOG_IP6;
        }

        if (cookie)
            lep->pplg_pid = cookie->pid;
        else
            lep->pplg_pid = -1;
    } else {
#ifdef DOLOGWAKE
        ts = 0ULL;
#endif
        pp_stat_increment(droppedlogs);
    }

    pp_logentries_unlock();

// Don't signal here, so as to avoid a possible thread preempt, or other stall.
// The new entry will be picked up by the log thread when its timeout expires.
#if DOLOGWAKE
    if (ts)
        pp_log_wakeup();
#endif
}
Exemple #24
0
MPERS_PRINTER_DECL(int, btrfs_ioctl,
		   struct tcb *tcp, const unsigned int code, const long arg)
{
	switch (code) {
	/* Take no arguments; command only. */
	case BTRFS_IOC_TRANS_START:
	case BTRFS_IOC_TRANS_END:
	case BTRFS_IOC_SYNC:
	case BTRFS_IOC_SCRUB_CANCEL:
	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
	/*
	 * The codes for these ioctls are based on each accepting a
	 * vol_args but none of them actually consume an argument.
	 */
	case BTRFS_IOC_DEFRAG:
	case BTRFS_IOC_BALANCE:
		break;

	/* takes a signed int */
	case BTRFS_IOC_BALANCE_CTL:
		tprints(", ");
		printxval(btrfs_balance_ctl_cmds, arg, "BTRFS_BALANCE_CTL_???");
		break;

	/* returns a 64 */
	case BTRFS_IOC_START_SYNC: /* R */
		if (entering(tcp))
			return 0;
	/* fall through */
	/* takes a u64 */
	case BTRFS_IOC_DEFAULT_SUBVOL: /* W */
	case BTRFS_IOC_WAIT_SYNC: /* W */
		tprints(", ");
		printnum_int64(tcp, arg, "%" PRIu64);
		break;

	/* u64 but describe a flags bitfield; we can make that symbolic */
	case BTRFS_IOC_SUBVOL_GETFLAGS: { /* R */
		uint64_t flags;

		if (entering(tcp))
			return 0;

		tprints(", ");

		if (umove_or_printaddr(tcp, arg, &flags))
			break;

		printflags64(btrfs_snap_flags_v2, flags, "BTRFS_SUBVOL_???");
		break;
	}

	case BTRFS_IOC_SUBVOL_SETFLAGS: { /* W */
		uint64_t flags;

		tprints(", ");

		if (umove_or_printaddr(tcp, arg, &flags))
			break;

		printflags64(btrfs_snap_flags_v2, flags, "BTRFS_SUBVOL_???");
		break;
	}

	/* More complex types */
	case BTRFS_IOC_BALANCE_V2: /* RW */
		if (entering(tcp)) {
			tprints(", ");
			btrfs_print_balance(tcp, arg, false);
			return 0;
		}

		if (syserror(tcp))
			break;

		tprints(" => ");
		btrfs_print_balance(tcp, arg, true);
		break;
	case BTRFS_IOC_BALANCE_PROGRESS: /* R */
		if (entering(tcp))
			return 0;

		tprints(", ");
		btrfs_print_balance(tcp, arg, true);
		break;

	case BTRFS_IOC_DEFRAG_RANGE: { /* W */
		struct btrfs_ioctl_defrag_range_args args;

		tprints(", ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{start=%" PRIu64 ", len=", (uint64_t)args.start);

		tprintf("%" PRIu64, (uint64_t) args.len);
		if (args.len == UINT64_MAX)
			tprints(" /* UINT64_MAX */");

		tprints(", flags=");
		printflags64(btrfs_defrag_flags, args.flags,
			     "BTRFS_DEFRAG_RANGE_???");
		tprintf(", extent_thresh=%u, compress_type=",
			args.extent_thresh);
		printxval(btrfs_compress_types, args.compress_type,
			  "BTRFS_COMPRESS_???");
		tprints("}");
		break;
	}

	case BTRFS_IOC_DEV_INFO: { /* RW */
		struct btrfs_ioctl_dev_info_args args;
		char uuid[UUID_STRING_SIZE+1];
		int valid;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;
		tprints("{");

		valid = btrfs_unparse_uuid(args.uuid, uuid);
		if (entering(tcp)) {
			tprintf("devid=%" PRI__u64, args.devid);
			if (valid)
				tprintf(", uuid=%s", uuid);
			tprints("}");
			return 0;
		}
		if (valid)
			tprintf("uuid=%s, ", uuid);
		tprintf("bytes_used=%" PRI__u64
			", total_bytes=%" PRI__u64 ", path=",
			args.bytes_used, args.total_bytes);
		print_quoted_string((const char *)args.path, sizeof(args.path),
				    QUOTE_0_TERMINATED);
		tprints("}");
		break;
	}

	case BTRFS_IOC_DEV_REPLACE: { /* RW */
		struct_btrfs_ioctl_dev_replace_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		if (entering(tcp)) {
			tprints("{cmd=");
			printxval64(btrfs_dev_replace_cmds, args.cmd,
				    "BTRFS_IOCTL_DEV_REPLACE_CMD_???");
			if (args.cmd == BTRFS_IOCTL_DEV_REPLACE_CMD_START) {
				const char *str;
				tprintf(", start={srcdevid=%" PRIu64
				   ", cont_reading_from_srcdev_mode=%" PRIu64
				   ", srcdev_name=",
				   (uint64_t) args.start.srcdevid,
				   (uint64_t) args.start.cont_reading_from_srcdev_mode);

				str = (const char*) args.start.srcdev_name;
				print_quoted_string(str,
						sizeof(args.start.srcdev_name),
						QUOTE_0_TERMINATED);
				tprints(", tgtdev_name=");
				str = (const char*) args.start.tgtdev_name;
				print_quoted_string(str,
						sizeof(args.start.tgtdev_name),
						QUOTE_0_TERMINATED);
				tprints("}");

			}
			tprints("}");
			return 0;
		}

		tprints("{result=");
		printxval64(btrfs_dev_replace_results, args.result,
			    "BTRFS_IOCTL_DEV_REPLACE_RESULT_???");
		if (args.cmd == BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS) {
			char buf[sizeof("HH:MM:SS") + 1];
			time_t time;
			tprints(", ");
			printxval64(btrfs_dev_replace_state,
				   args.status.replace_state,
				   "BTRFS_IOCTL_DEV_REPLACE_STATE_???");
			tprintf(", progress_1000=%" PRIu64 " /* ",
				(uint64_t) args.status.progress_1000);
			if (args.status.progress_1000 <= 1000)
				tprintf("%" PRIu64 ".%.2" PRIu64 "%%",
					(uint64_t) args.status.progress_1000 / 10,
					(uint64_t) args.status.progress_1000 % 10);
			else
				tprints("???");
			tprints(" */ ,");

			time = args.status.time_started;
			strftime(buf, sizeof(buf), "%T",
				 localtime(&time));
			tprintf("time_started=%" PRIu64" /* %s */, ",
				(uint64_t) args.status.time_started, buf);

			time = args.status.time_stopped;
			strftime(buf, sizeof(buf), "%T",
				 localtime(&time));
			tprintf("time_stopped=%" PRIu64" /* %s */, ",
				(uint64_t) args.status.time_stopped, buf);

			tprintf("num_write_errors=%" PRIu64
				", num_uncorrectable_read_errors=%" PRIu64,
				(uint64_t) args.status.num_write_errors,
				(uint64_t) args.status.num_uncorrectable_read_errors);
		}
		tprints("}");
		break;
	}

	case BTRFS_IOC_GET_FEATURES: { /* R */
		struct btrfs_ioctl_feature_flags flags;

		if (entering(tcp))
			return 0;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &flags))
			break;

		btrfs_print_features(&flags);
		break;
	}

	case BTRFS_IOC_SET_FEATURES: { /* W */
		struct btrfs_ioctl_feature_flags flarg[2];

		tprints(", ");

		if (umove_or_printaddr(tcp, arg, &flarg))
			break;

		tprints("[");
		btrfs_print_features(&flarg[0]);
		tprints(", ");
		btrfs_print_features(&flarg[1]);
		tprints("]");
		break;
	}

	case BTRFS_IOC_GET_SUPPORTED_FEATURES: { /* R */
		struct btrfs_ioctl_feature_flags flarg[3];

		if (entering(tcp))
			return 0;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &flarg))
			break;

		tprints("[ /* supported */ ");
		btrfs_print_features(&flarg[0]);

		tprints(", /* safe to set */ ");
		btrfs_print_features(&flarg[1]);

		tprints(", /* safe to clear */ ");
		btrfs_print_features(&flarg[2]);
		tprints("]");

		break;
	}

	case BTRFS_IOC_FS_INFO: { /* R */
		struct btrfs_ioctl_fs_info_args args;
		char uuid[UUID_STRING_SIZE+1];
		uint32_t nodesize, sectorsize, clone_alignment;
#ifndef HAVE_STRUCT_BTRFS_IOCTL_FS_INFO_ARGS_NODESIZE
		__u32 *reserved32;
#endif

		if (entering(tcp))
			return 0;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

#ifdef HAVE_STRUCT_BTRFS_IOCTL_FS_INFO_ARGS_NODESIZE
		nodesize = args.nodesize,
		sectorsize = args.sectorsize,
		clone_alignment = args.clone_alignment;
#else
		reserved32 = (__u32 *) (void *) args.reserved;
		nodesize = reserved32[0];
		sectorsize = reserved32[1];
		clone_alignment = reserved32[2];
#endif
		btrfs_unparse_uuid(args.fsid, uuid);

		tprints("{");
		tprintf("max_id=%" PRI__u64 ", num_devices=%" PRI__u64
			", fsid=%s, nodesize=%u, sectorsize=%u"
			", clone_alignment=%u",
			args.max_id, args.num_devices, uuid,
			nodesize, sectorsize, clone_alignment);
		tprints("}");
		break;
	}

	case BTRFS_IOC_GET_DEV_STATS: { /* RW */
		struct btrfs_ioctl_get_dev_stats args;
		uint64_t i;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{");

		if (entering(tcp))
			tprintf("devid=%" PRI__u64 ", ", args.devid);

		tprintf("nr_items=%" PRI__u64 ", flags=", args.nr_items);
		printflags64(btrfs_dev_stats_flags, args.flags,
			     "BTRFS_DEV_STATS_???");

		if (entering(tcp)) {
			tprints("}");
			return 0;
		}

		/*
		 * The structure has a 1k limit; Let's make sure we don't
		 * go off into the middle of nowhere with a bad nr_items
		 * value.
		 */
		tprints(", [");
		for (i = 0; i < args.nr_items; i++) {
			if (i)
				tprints(", ");
			if (i >= ARRAY_SIZE(args.values)) {
				tprints("...");
				break;
			}
			const char *name = xlookup(btrfs_dev_stats_values, i);
			if (name)
				tprintf("/* %s */ ", name);
			tprintf("%" PRI__u64, args.values[i]);
		}
		tprints("]}");
		break;
	}

	case BTRFS_IOC_INO_LOOKUP: { /* RW */
		struct btrfs_ioctl_ino_lookup_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		if (entering(tcp)) {
			/* Use subvolume id of the containing root */
			if (args.treeid == 0)
				set_tcb_priv_ulong(tcp, 1);

			tprints("{treeid=");
			btrfs_print_objectid(args.treeid);
			tprints(", objectid=");
			btrfs_print_objectid(args.objectid);
			tprints("}");
			return 0;
		}

		tprints("{");
		if (get_tcb_priv_ulong(tcp)) {
			tprints("treeid=");
			btrfs_print_objectid(args.treeid);
			tprints(", ");
		}

		tprints("name=");
		print_quoted_string(args.name, sizeof(args.name),
				    QUOTE_0_TERMINATED);
		tprints("}");
		break;
	}

	case BTRFS_IOC_INO_PATHS: { /* RW */
		struct btrfs_ioctl_ino_path_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{");

		if (entering(tcp)) {
			tprintf("inum=%" PRI__u64 ", size=%" PRI__u64,
				args.inum, args.size);
			tprintf(", fspath=0x%" PRI__x64 "}", args.fspath);
			return 0;
		}

		tprints("fspath=");
		btrfs_print_ino_path_container(tcp, args.fspath);

		tprints("}");
		break;
	}

	case BTRFS_IOC_LOGICAL_INO: { /* RW */
		struct btrfs_ioctl_logical_ino_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{");

		if (entering(tcp)) {
			tprintf("logical=%" PRI__u64 ", size=%" PRI__u64,
				args.logical, args.size);
			tprintf(", inodes=0x%" PRI__x64 "}", args.inodes);
			return 0;
		}

		tprints("inodes=");
		btrfs_print_logical_ino_container(tcp, args.inodes);

		tprints("}");
		break;
	}

	case BTRFS_IOC_QGROUP_ASSIGN: { /* W */
		struct btrfs_ioctl_qgroup_assign_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{assign=%" PRI__u64 ", src=%" PRI__u64
			", dst=%" PRI__u64 "}",
			args.assign, args.src, args.dst);
		break;
	}

	case BTRFS_IOC_QGROUP_CREATE: { /* W */
		struct btrfs_ioctl_qgroup_create_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{create=%" PRI__u64 ", qgroupid=%" PRI__u64 "}",
			args.create, args.qgroupid);
		break;
	}

	case BTRFS_IOC_QGROUP_LIMIT: { /* R */
		struct btrfs_ioctl_qgroup_limit_args args;

		if (entering(tcp))
			return 0;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{qgroupid=%" PRI__u64 ", lim=", args.qgroupid);
		btrfs_print_qgroup_limit(&args.lim);
		tprints("}");
		break;
	}

	case BTRFS_IOC_QUOTA_CTL: { /* W */
		struct btrfs_ioctl_quota_ctl_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		printxval64(btrfs_qgroup_ctl_cmds, args.cmd,
			    "BTRFS_QUOTA_CTL_???");
		tprints("}");

		break;
	}

	case BTRFS_IOC_QUOTA_RESCAN: { /* W */
		struct btrfs_ioctl_quota_rescan_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{flags=%" PRIu64 "}", (uint64_t) args.flags);
		break;
	}

	case BTRFS_IOC_QUOTA_RESCAN_STATUS: { /* R */
		struct btrfs_ioctl_quota_rescan_args args;

		if (entering(tcp))
			return 0;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprintf("{flags=%" PRIu64 ", progress=", (uint64_t) args.flags);
		btrfs_print_objectid(args.progress);
		tprints("}");
		break;
	}

	case BTRFS_IOC_SET_RECEIVED_SUBVOL: { /* RW */
		struct_btrfs_ioctl_received_subvol_args args;
		char uuid[UUID_STRING_SIZE+1];

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		if (entering(tcp)) {
			btrfs_unparse_uuid((unsigned char *)args.uuid, uuid);
			tprintf("{uuid=%s, stransid=%" PRIu64
				", stime=%" PRIu64 ".%u, flags=%" PRIu64
				"}", uuid, (uint64_t) args.stransid,
				(uint64_t) args.stime.sec, args.stime.nsec,
				(uint64_t) args.flags);
			return 0;
		}
		tprintf("{rtransid=%" PRIu64 ", rtime=%" PRIu64 ".%u}",
			(uint64_t) args.rtransid, (uint64_t) args.rtime.sec,
			args.rtime.nsec);
		break;
	}

	case BTRFS_IOC_SCRUB: /* RW */
	case BTRFS_IOC_SCRUB_PROGRESS: { /* RW */
		struct btrfs_ioctl_scrub_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		if (entering(tcp)) {
			tprintf("{devid=%" PRI__u64, args.devid);
			if (code == BTRFS_IOC_SCRUB) {
				tprintf(", start=%" PRI__u64 ", end=",
					args.start);
				tprintf("%" PRI__u64, args.end);
				if (args.end == UINT64_MAX)
					tprints(" /* UINT64_MAX */");
				tprints(", flags=");
				printflags64(btrfs_scrub_flags, args.flags,
					     "BTRFS_SCRUB_???");
			}
			tprints("}");
			return 0;
		}
		tprintf("{data_extents_scrubbed=%" PRI__u64
			", tree_extents_scrubbed=%" PRI__u64
			", data_bytes_scrubbed=%" PRI__u64
			", tree_bytes_scrubbed=%" PRI__u64
			", read_errors=%" PRI__u64
			", csum_errors=%" PRI__u64
			", verify_errors=%" PRI__u64
			", no_csum=%" PRI__u64
			", csum_discards=%" PRI__u64
			", super_errors=%" PRI__u64
			", malloc_errors=%" PRI__u64
			", uncorrectable_errors=%" PRI__u64
			", corrected_errors=%" PRI__u64
			", last_physical=%" PRI__u64
			", unverified_errors=%" PRI__u64 "}",
			args.progress.data_extents_scrubbed,
			args.progress.tree_extents_scrubbed,
			args.progress.data_bytes_scrubbed,
			args.progress.tree_bytes_scrubbed,
			args.progress.read_errors,
			args.progress.csum_errors,
			args.progress.verify_errors,
			args.progress.no_csum,
			args.progress.csum_discards,
			args.progress.super_errors,
			args.progress.malloc_errors,
			args.progress.uncorrectable_errors,
			args.progress.corrected_errors,
			args.progress.last_physical,
			args.progress.unverified_errors);
		break;
	}

	case BTRFS_IOC_TREE_SEARCH: { /* RW */
		struct btrfs_ioctl_search_args args;
		uint64_t buf_offset;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		buf_offset = offsetof(struct btrfs_ioctl_search_args, buf);
		btrfs_print_tree_search(tcp, &args.key, arg + buf_offset,
					sizeof(args.buf), false);
		if (entering(tcp))
			return 0;
		break;
	}

	case BTRFS_IOC_TREE_SEARCH_V2: { /* RW */
		struct btrfs_ioctl_search_args_v2 args;
		uint64_t buf_offset;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp)) {
			if (tcp->u_error == EOVERFLOW) {
				tprints(" => ");
				tcp->u_error = 0;
				if (!umove_or_printaddr(tcp, arg, &args))
					tprintf("{buf_size=%" PRIu64 "}",
						(uint64_t)args.buf_size);
				tcp->u_error = EOVERFLOW;
			}
			break;
		} else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		buf_offset = offsetof(struct btrfs_ioctl_search_args_v2, buf);
		btrfs_print_tree_search(tcp, &args.key, arg + buf_offset,
					args.buf_size, true);
		if (entering(tcp))
			return 0;
		break;
	}

	case BTRFS_IOC_SEND: { /* W */
		struct_btrfs_ioctl_send_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{send_fd=");
		printfd(tcp, args.send_fd);
		tprintf(", clone_sources_count=%" PRIu64 ", clone_sources=",
			(uint64_t) args.clone_sources_count);

		if (abbrev(tcp))
			tprints("...");
		else {
			uint64_t record;
			print_array(tcp, (unsigned long) args.clone_sources,
				    args.clone_sources_count,
				    &record, sizeof(record),
				    umoven_or_printaddr,
				    print_objectid_callback, 0);
		}
		tprints(", parent_root=");
		btrfs_print_objectid(args.parent_root);
		tprints(", flags=");
		printflags64(btrfs_send_flags, args.flags,
			     "BTRFS_SEND_FLAGS_???");
		tprints("}");
		break;
	}

	case BTRFS_IOC_SPACE_INFO: { /* RW */
		struct btrfs_ioctl_space_args args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{");
		if (entering(tcp)) {
			tprintf("space_slots=%" PRI__u64 "}", args.space_slots);
			return 0;
		}

		tprintf("total_spaces=%" PRI__u64, args.total_spaces);

		if (args.space_slots == 0 && args.total_spaces) {
			tprints("}");
			break;
		}

		tprints(", spaces=");

		if (abbrev(tcp))
			tprints("...");
		else {
			struct btrfs_ioctl_space_info info;
			print_array(tcp, arg + offsetof(typeof(args), spaces),
				    args.total_spaces,
				    &info, sizeof(info), umoven_or_printaddr,
				    print_btrfs_ioctl_space_info, 0);
		}
		tprints("}");
		break;
	}

	case BTRFS_IOC_SNAP_CREATE:
	case BTRFS_IOC_RESIZE:
	case BTRFS_IOC_SCAN_DEV:
	case BTRFS_IOC_ADD_DEV:
	case BTRFS_IOC_RM_DEV:
	case BTRFS_IOC_SUBVOL_CREATE:
	case BTRFS_IOC_SNAP_DESTROY:
	case BTRFS_IOC_DEVICES_READY: { /* W */
		struct btrfs_ioctl_vol_args args;

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &args))
			break;

		tprints("{fd=");
		printfd(tcp, args.fd);
		tprints(", name=");
		print_quoted_string(args.name, sizeof(args.name),
				    QUOTE_0_TERMINATED);
		tprints("}");
		break;
	}

	case BTRFS_IOC_SNAP_CREATE_V2:
	case BTRFS_IOC_SUBVOL_CREATE_V2: { /* code is W, but is actually RW */
		struct btrfs_ioctl_vol_args_v2 args;

		if (entering(tcp))
			tprints(", ");
		else if (syserror(tcp))
			break;
		else
			tprints(" => ");

		if (umove_or_printaddr(tcp, arg, &args))
			break;

		if (entering(tcp)) {
			tprints("{fd=");
			printfd(tcp, args.fd);
			tprints(", flags=");
			printflags64(btrfs_snap_flags_v2, args.flags,
				     "BTRFS_SUBVOL_???");
			if (args.flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
				tprintf(", size=%" PRI__u64 ", qgroup_inherit=",
					args.size);

				btrfs_print_qgroup_inherit(tcp,
					(unsigned long)args.qgroup_inherit);
			}
			tprintf(", name=");
			print_quoted_string(args.name, sizeof(args.name),
					    QUOTE_0_TERMINATED);
			tprints("}");
			return 0;
		}
		tprintf("{transid=%" PRI__u64 "}", args.transid);
		break;
	}

	case BTRFS_IOC_GET_FSLABEL: /* R */
		if (entering(tcp))
			return 0;
		/* fall through */
	case BTRFS_IOC_SET_FSLABEL: { /* W */
		char label[BTRFS_LABEL_SIZE];

		tprints(", ");
		if (umove_or_printaddr(tcp, arg, &label))
			break;
		print_quoted_string(label, sizeof(label), QUOTE_0_TERMINATED);
		break;
	}

	case BTRFS_IOC_CLONE:			/* FICLONE */
	case BTRFS_IOC_CLONE_RANGE:		/* FICLONERANGE */
#ifdef BTRFS_IOC_FILE_EXTENT_SAME
	case BTRFS_IOC_FILE_EXTENT_SAME:	/* FIDEDUPERANGE */
#endif
		/*
		 * FICLONE, FICLONERANGE, and FIDEDUPERANGE started out as
		 * btrfs ioctls and the code was kept for the generic
		 * implementations.  We use the BTRFS_* names here because
		 * they will be available on older systems.
		 */
		return file_ioctl(tcp, code, arg);

	default:
		return RVAL_DECODED;
	};
	return RVAL_DECODED | 1;
}
static int help(struct sk_buff *skb, unsigned int protoff,
		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff;
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct tcphdr _tcph;
	const char *data_limit;
	char *data, *ib_ptr;
	int dir = CTINFO2DIR(ctinfo);
	struct nf_conntrack_expect *exp;
	struct nf_conntrack_tuple *tuple;
	__be32 dcc_ip;
	u_int16_t dcc_port;
	__be16 port;
	int i, ret = NF_ACCEPT;
	char *addr_beg_p, *addr_end_p;
	typeof(nf_nat_irc_hook) nf_nat_irc;

	
	if (dir == IP_CT_DIR_REPLY)
		return NF_ACCEPT;

	
	if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
		return NF_ACCEPT;

	
	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	
	dataoff = protoff + th->doff*4;
	if (dataoff >= skb->len)
		return NF_ACCEPT;

	spin_lock_bh(&irc_buffer_lock);
	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
				    irc_buffer);
	BUG_ON(ib_ptr == NULL);

	data = ib_ptr;
	data_limit = ib_ptr + skb->len - dataoff;

	while (data < data_limit - (19 + MINMATCHLEN)) {
		if (memcmp(data, "\1DCC ", 5)) {
			data++;
			continue;
		}
		data += 5;
		

		iph = ip_hdr(skb);
		pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
			 &iph->saddr, ntohs(th->source),
			 &iph->daddr, ntohs(th->dest));

		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
				
				continue;
			}
			data += strlen(dccprotos[i]);
			pr_debug("DCC %s detected\n", dccprotos[i]);

			if (parse_dcc(data, data_limit, &dcc_ip,
				       &dcc_port, &addr_beg_p, &addr_end_p)) {
				pr_debug("unable to parse dcc command\n");
				continue;
			}

			pr_debug("DCC bound ip/port: %pI4:%u\n",
				 &dcc_ip, dcc_port);

			
			tuple = &ct->tuplehash[dir].tuple;
			if (tuple->src.u3.ip != dcc_ip &&
			    tuple->dst.u3.ip != dcc_ip) {
				if (net_ratelimit())
					printk(KERN_WARNING
						"Forged DCC command from %pI4: %pI4:%u\n",
						&tuple->src.u3.ip,
						&dcc_ip, dcc_port);
				continue;
			}

			exp = nf_ct_expect_alloc(ct);
			if (exp == NULL) {
				ret = NF_DROP;
				goto out;
			}
			tuple = &ct->tuplehash[!dir].tuple;
			port = htons(dcc_port);
			nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
					  tuple->src.l3num,
					  NULL, &tuple->dst.u3,
					  IPPROTO_TCP, NULL, &port);

			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
				ret = nf_nat_irc(skb, ctinfo,
						 addr_beg_p - ib_ptr,
						 addr_end_p - addr_beg_p,
						 exp);
			else if (nf_ct_expect_related(exp) != 0)
				ret = NF_DROP;
			nf_ct_expect_put(exp);
			goto out;
		}
	}
 out:
	spin_unlock_bh(&irc_buffer_lock);
	return ret;
}
Exemple #26
0
int pfq_setsockopt(struct socket *sock,
                int level, int optname,
                char __user * optval,
#if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
                unsigned
#endif
                int optlen)
{
        struct pfq_sock *so = pfq_sk(sock->sk);
        struct pfq_rx_opt * ro;
        struct pfq_tx_opt * to;

        bool found = true;

        if (so == NULL)
                return -EINVAL;

        ro = &so->rx_opt;
        to = &so->tx_opt;

        switch(optname)
        {
        case Q_SO_TOGGLE_QUEUE:
        {
                int active;
                if (optlen != sizeof(active))
                        return -EINVAL;
                if (copy_from_user(&active, optval, optlen))
                        return -EFAULT;

                if (active)
                {
                        if (!so->mem_addr)
                        {
                                struct pfq_queue_hdr * queue;

                                /* alloc queue memory */

                                if (pfq_shared_queue_alloc(so, pfq_queue_total_mem(so)) < 0)
                                {
                                        return -ENOMEM;
                                }

                                /* so->mem_addr and so->mem_size are correctly configured */

                                /* initialize queues headers */

                                queue = (struct pfq_queue_hdr *)so->mem_addr;

                                /* initialize rx queue header */

                                queue->rx.data              = (1L << 24);
                                queue->rx.poll_wait         = 0;
                                queue->rx.size              = so->rx_opt.size;
                                queue->rx.slot_size         = so->rx_opt.slot_size;

                                queue->tx.producer.index    = 0;
                                queue->tx.producer.cache    = 0;
                                queue->tx.consumer.index    = 0;
                                queue->tx.consumer.cache    = 0;

                                queue->tx.size_mask         = so->tx_opt.size - 1;
                                queue->tx.max_len           = so->tx_opt.maxlen;
                                queue->tx.size              = so->tx_opt.size;
                                queue->tx.slot_size         = so->tx_opt.slot_size;

                                /* update the queues base_addr */

                                so->rx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr);
                                so->tx_opt.base_addr = so->mem_addr + sizeof(struct pfq_queue_hdr) + pfq_queue_mpdb_mem(so);

                                /* commit both the queues */

                                smp_wmb();

                                so->rx_opt.queue_ptr = &queue->rx;
                                so->tx_opt.queue_ptr = &queue->tx;

                                pr_devel("[PFQ|%d] queue: rx_size:%d rx_slot_size:%d tx_size:%d tx_slot_size:%d\n", so->id, queue->rx.size,
                                                queue->rx.slot_size,
                                                queue->tx.size,
                                                queue->tx.slot_size);
                        }
                }
                else
                {
                        if (so->tx_opt.thread)
                        {
                                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);
                                kthread_stop(so->tx_opt.thread);
                                so->tx_opt.thread = NULL;
                        }

                        msleep(Q_GRACE_PERIOD);

                        pfq_shared_queue_free(so);
                }

        } break;

        case Q_SO_GROUP_BIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "add binding");

                pfq_devmap_update(map_set, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_GROUP_UNBIND:
        {
                struct pfq_binding bind;
                if (optlen != sizeof(struct pfq_binding))
                        return -EINVAL;

                if (copy_from_user(&bind, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, bind.gid, "remove binding");

                pfq_devmap_update(map_reset, bind.if_index, bind.hw_queue, bind.gid);
        } break;

        case Q_SO_EGRESS_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                so->egress_index = info.if_index;
                so->egress_queue = info.hw_queue;

                pr_devel("[PFQ|%d] egress bind: if_index:%d hw_queue:%d\n", so->id, so->egress_index, so->egress_queue);

        } break;

        case Q_SO_EGRESS_UNBIND:
        {
                so->egress_index = 0;
                so->egress_queue = 0;
                pr_devel("[PFQ|%d] egress unbind.\n", so->id);

        } break;

        case Q_SO_SET_RX_TSTAMP:
        {
                int tstamp;
                if (optlen != sizeof(so->rx_opt.tstamp))
                        return -EINVAL;

                if (copy_from_user(&tstamp, optval, optlen))
                        return -EFAULT;

                tstamp = tstamp ? 1 : 0;

                /* update the timestamp_enabled counter */

                atomic_add(tstamp - so->rx_opt.tstamp, &timestamp_enabled);
                so->rx_opt.tstamp = tstamp;

                pr_devel("[PFQ|%d] timestamp_enabled counter: %d\n", so->id, atomic_read(&timestamp_enabled));
        } break;

        case Q_SO_SET_RX_CAPLEN:
        {
                typeof(so->rx_opt.caplen) caplen;

                if (optlen != sizeof(caplen))
                        return -EINVAL;
                if (copy_from_user(&caplen, optval, optlen))
                        return -EFAULT;

                if (caplen > (size_t)cap_len) {
                        pr_devel("[PFQ|%d] invalid caplen:%zu (max: %d)\n", so->id, caplen, cap_len);
                        return -EPERM;
                }

                so->rx_opt.caplen = caplen;

                so->rx_opt.slot_size = MPDB_QUEUE_SLOT_SIZE(so->rx_opt.caplen);

                pr_devel("[PFQ|%d] caplen:%zu -> slot_size:%zu\n",
                                so->id, so->rx_opt.caplen, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_RX_SLOTS:
        {
                typeof(so->rx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots > (size_t)rx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid rx slots:%zu (max: %d)\n", so->id, slots, rx_queue_slots);
                        return -EPERM;
                }

                so->rx_opt.size = slots;

                pr_devel("[PFQ|%d] rx_queue_slots:%zu\n", so->id, so->rx_opt.size);
        } break;

        case Q_SO_SET_TX_MAXLEN:
        {
                typeof (so->tx_opt.maxlen) maxlen;
                if (optlen != sizeof(maxlen))
                        return -EINVAL;
                if (copy_from_user(&maxlen, optval, optlen))
                        return -EFAULT;

                if (maxlen > (size_t)max_len) {
                        pr_devel("[PFQ|%d] invalid maxlen:%zu (max: %d)\n", so->id, maxlen, max_len);
                        return -EPERM;
                }

                so->tx_opt.maxlen = maxlen;

                so->tx_opt.slot_size = SPSC_QUEUE_SLOT_SIZE(so->tx_opt.maxlen); /* max_len: max length */

                pr_devel("[PFQ|%d] tx_slot_size:%zu\n", so->id, so->rx_opt.slot_size);
        } break;

        case Q_SO_SET_TX_SLOTS:
        {
                typeof (so->tx_opt.size) slots;

                if (optlen != sizeof(slots))
                        return -EINVAL;
                if (copy_from_user(&slots, optval, optlen))
                        return -EFAULT;

                if (slots & (slots-1))
                {
                        pr_devel("[PFQ|%d] tx slots must be a power of two.\n", so->id);
                        return -EINVAL;
                }

                if (slots > (size_t)tx_queue_slots) {
                        pr_devel("[PFQ|%d] invalid tx slots:%zu (max: %d)\n", so->id, slots, tx_queue_slots);
                        return -EPERM;
                }

                so->tx_opt.size = slots;

                pr_devel("[PFQ|%d] tx_queue_slots:%zu\n", so->id, so->tx_opt.size);
        } break;

        case Q_SO_GROUP_LEAVE:
        {
                int gid;
                if (optlen != sizeof(gid))
                        return -EINVAL;
                if (copy_from_user(&gid, optval, optlen))
                        return -EFAULT;

                if (pfq_leave_group(gid, so->id) < 0) {
                        return -EFAULT;
                }

                pr_devel("[PFQ|%d] leave: gid:%d\n", so->id, gid);
        } break;

        case Q_SO_GROUP_FPROG:
        {
                struct pfq_fprog fprog;
                if (optlen != sizeof(fprog))
                        return -EINVAL;

                if (copy_from_user(&fprog, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, fprog.gid, "group fprog");

                if (fprog.fcode.len > 0)  /* set the filter */
                {
                        struct sk_filter *filter = pfq_alloc_sk_filter(&fprog.fcode);
                        if (filter == NULL)
                        {
                                pr_devel("[PFQ|%d] fprog error: alloc_sk_filter for gid:%d\n", so->id, fprog.gid);
                                return -EINVAL;
                        }

                        __pfq_set_group_filter(fprog.gid, filter);

                        pr_devel("[PFQ|%d] fprog: gid:%d (fprog len %d bytes)\n", so->id, fprog.gid, fprog.fcode.len);
                }
                else 	/* reset the filter */
                {
                        __pfq_set_group_filter(fprog.gid, NULL);

                        pr_devel("[PFQ|%d] fprog: gid:%d (resetting filter)\n", so->id, fprog.gid);
                }

        } break;

        case Q_SO_GROUP_VLAN_FILT_TOGGLE:
        {
                struct pfq_vlan_toggle vlan;

                if (optlen != sizeof(vlan))
                        return -EINVAL;
                if (copy_from_user(&vlan, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, vlan.gid, "group vlan filt toggle");

                __pfq_toggle_group_vlan_filters(vlan.gid, vlan.toggle);

                pr_devel("[PFQ|%d] vlan filters %s for gid:%d\n", so->id, (vlan.toggle ? "enabled" : "disabled"), vlan.gid);
        } break;

        case Q_SO_GROUP_VLAN_FILT:
        {
                struct pfq_vlan_toggle filt;

                if (optlen != sizeof(filt))
                        return -EINVAL;

                if (copy_from_user(&filt, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, filt.gid, "group vlan filt");

                if (filt.vid < -1 || filt.vid > 4094) {
                        pr_devel("[PFQ|%d] vlan_set error: gid:%d invalid vid:%d!\n", so->id, filt.gid, filt.vid);
                        return -EINVAL;
                }

                if (!__pfq_vlan_filters_enabled(filt.gid)) {
                        pr_devel("[PFQ|%d] vlan_set error: vlan filters disabled for gid:%d!\n", so->id, filt.gid);
                        return -EPERM;
                }

                if (filt.vid  == -1) /* any */
                {
                        int i;
                        for(i = 1; i < 4095; i++)
                                __pfq_set_group_vlan_filter(filt.gid, filt.toggle, i);
                }
                else
                {
                        __pfq_set_group_vlan_filter(filt.gid, filt.toggle, filt.vid);
                }

                pr_devel("[PFQ|%d] vlan_set filter vid %d for gid:%d\n", so->id, filt.vid, filt.gid);
        } break;

        case Q_SO_TX_THREAD_BIND:
        {
                struct pfq_binding info;

                if (optlen != sizeof(info))
                        return -EINVAL;
                if (copy_from_user(&info, optval, optlen))
                        return -EFAULT;

                rcu_read_lock();
                if (!dev_get_by_index_rcu(sock_net(&so->sk), info.if_index))
                {
                        rcu_read_unlock();
                        pr_devel("[PFQ|%d] TX bind: invalid if_index:%d\n", so->id, info.if_index);
                        return -EPERM;
                }
                rcu_read_unlock();

                if (info.hw_queue < -1)
                {
                        pr_devel("[PFQ|%d] TX bind: invalid queue:%d\n", so->id, info.hw_queue);
                        return -EPERM;
                }

                to->if_index = info.if_index;
                to->hw_queue = info.hw_queue;

                pr_devel("[PFQ|%d] TX bind: if_index:%d hw_queue:%d\n", so->id, to->if_index, to->hw_queue);

        } break;

        case Q_SO_TX_THREAD_START:
        {
                int cpu;

                if (to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread already created on cpu %d!\n", so->id, to->cpu);
                        return -EPERM;
                }
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                if (optlen != sizeof(cpu))
                        return -EINVAL;

                if (copy_from_user(&cpu, optval, optlen))
                        return -EFAULT;

                if (cpu < -1 || (cpu > -1  && !cpu_online(cpu)))
                {
                        pr_devel("[PFQ|%d] invalid cpu (%d)!\n", so->id, cpu);
                        return -EPERM;
                }

                to->cpu = cpu;

                pr_devel("[PFQ|%d] creating TX thread on cpu %d -> if_index:%d hw_queue:%d\n", so->id, to->cpu, to->if_index, to->hw_queue);

                to->thread = kthread_create_on_node(pfq_tx_thread,
                                so,
                                to->cpu == -1 ? -1 : cpu_to_node(to->cpu),
                                "pfq_tx_%d", so->id);

                if (IS_ERR(to->thread)) {
                        printk(KERN_INFO "[PFQ] kernel_thread() create failed on cpu %d!\n", to->cpu);
                        return PTR_ERR(to->thread);
                }

                if (to->cpu != -1)
                        kthread_bind(to->thread, to->cpu);

        } break;

        case Q_SO_TX_THREAD_STOP:
        {
                pr_devel("[PFQ|%d] stopping TX thread...\n", so->id);

                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                kthread_stop(to->thread);
                to->thread = NULL;

                pr_devel("[PFQ|%d] stop TX thread: done.\n", so->id);

        } break;

        case Q_SO_TX_THREAD_WAKEUP:
        {
                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }
                if (!to->thread)
                {
                        pr_devel("[PFQ|%d] TX thread not running!\n", so->id);
                        return -EPERM;
                }

                wake_up_process(to->thread);
        } break;

        case Q_SO_TX_QUEUE_FLUSH:
        {
                struct net_device *dev;

                if (to->if_index == -1)
                {
                        pr_devel("[PFQ|%d] socket TX not bound to any device!\n", so->id);
                        return -EPERM;
                }

                if (to->thread && to->thread->state == TASK_RUNNING)
                {
                        pr_devel("[PFQ|%d] TX thread is running!\n", so->id);
                        return -EPERM;
                }

                if (to->queue_ptr == NULL)
                {
                        pr_devel("[PFQ|%d] socket not enabled!\n", so->id);
                        return -EPERM;
                }

                dev = dev_get_by_index(sock_net(&so->sk), to->if_index);
                if (!dev)
                {
                        pr_devel("[PFQ|%d] No such device (if_index = %d)\n", so->id, to->if_index);
                        return -EPERM;
                }

                pfq_tx_queue_flush(to, dev, get_cpu(), NUMA_NO_NODE);
                put_cpu();

                dev_put(dev);
        } break;

        case Q_SO_GROUP_FUNCTION:
        {
                struct pfq_group_computation tmp;
                struct pfq_computation_descr *descr;
                size_t psize, ucsize;

                struct pfq_computation_tree *comp;
                void *context;

                if (optlen != sizeof(tmp))
                        return -EINVAL;
                if (copy_from_user(&tmp, optval, optlen))
                        return -EFAULT;

                CHECK_GROUP_ACCES(so->id, tmp.gid, "group computation");

                if (copy_from_user(&psize, tmp.prog, sizeof(size_t)))
                        return -EFAULT;

                pr_devel("[PFQ|%d] computation size: %zu\n", so->id, psize);

                ucsize = sizeof(size_t) * 2 + psize * sizeof(struct pfq_functional_descr);

                descr = kmalloc(ucsize, GFP_KERNEL);
                if (descr == NULL) {
                        pr_devel("[PFQ|%d] computation: out of memory!\n", so->id);
                        return -ENOMEM;
                }

                if (copy_from_user(descr, tmp.prog, ucsize)) {
                        pr_devel("[PFQ|%d] computation: copy_from_user error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* print user computation */

                pr_devel_computation_descr(descr);

		/* ensure the correctness of the specified functional computation */

		if (pfq_validate_computation_descr(descr) < 0) {
                        pr_devel("[PFQ|%d] invalid expression!\n", so->id);
                        return -EFAULT;
		}

                /* allocate context */

                context = pfq_context_alloc(descr);
                if (context == NULL) {
                        pr_devel("[PFQ|%d] context: alloc error!\n", so->id);
                        kfree(descr);
                        return -EFAULT;
                }

                /* allocate struct pfq_computation_tree */

                comp = pfq_computation_alloc(descr);
                if (comp == NULL) {
                        pr_devel("[PFQ|%d] computation: alloc error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        return -EFAULT;
                }

                /* link the functional computation */

                if (pfq_computation_rtlink(descr, comp, context) < 0) {
                        pr_devel("[PFQ|%d] computation aborted!", so->id);
			kfree(context);
			kfree(descr);
			kfree(comp);
                        return -EPERM;
                }

		/* print executable tree data structure */

		pr_devel_computation_tree(comp);

		/* exec init functions */

		if (pfq_computation_init(comp) < 0) {
                        pr_devel("[PFQ|%d] computation initialization aborted!", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
		}

                /* set the new program */

                if (pfq_set_group_prog(tmp.gid, comp, context) < 0) {
                        pr_devel("[PFQ|%d] set group program error!\n", so->id);
                        kfree(context);
                        kfree(descr);
                        kfree(comp);
                        return -EPERM;
                }

		kfree(descr);
                return 0;

        } break;

        default:
        {
                found = false;
        } break;

        }

        return found ? 0 : sock_setsockopt(sock, level, optname, optval, optlen);
}
Exemple #27
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __init sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
				&sn_coherency_id, &sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	cpuid = smp_processor_id();
	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	pda->p_nodepda = nodepdaindr[cnode];
	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		memcpy(pda->cnodeid_to_nasid_table,
		       pdacpu(0)->cnodeid_to_nasid_table,
		       sizeof(pda->cnodeid_to_nasid_table));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, 
			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
Exemple #28
0
void f(int n) {
  typeof(n)();
  decltype(n)();
}
Exemple #29
0
int main(int argc, char *argv[])
{
	struct srio_dev *sriodev;
	struct dma_ch *send_dmadev[SEND_THREAD_NUM];
	struct dma_ch *receive_dmadev[RECEIVE_THREAD_NUM];
	struct dma_pool *dmapool = NULL;
	int i, err;
	struct srio_port_data *port_data;
	uint32_t attr_read, attr_write;
	struct task_arg_type task_arg_send[SEND_THREAD_NUM];
	struct task_arg_type task_arg_receive[RECEIVE_THREAD_NUM];
	pthread_t send_id[SEND_THREAD_NUM];
	pthread_t receive_id[RECEIVE_THREAD_NUM];
	of_init();
	err = fsl_srio_uio_init(&sriodev);
	if (err < 0)
		error(EXIT_FAILURE, -err, "%s(): srio_uio_init()", __func__);

	port_num = fsl_srio_get_port_num(sriodev);

	memset(&cmd_param, 0, sizeof(cmd_param));
/*ctx add*/
	cmd_param.curr_port_id=0;
	cmd_param.start_cpu=1;
	cmd_param.passes=10;
	cmd_param.test_type=0;
	cmd_param.test_srio_type=0;
	if (argc<3)
	{
		cmd_format_print();
		return -1;
	}
	err = cmd_translate(argc, argv,&cmd_param);
	if(err==2)
	{
		return 0;
	}
	if ((err < 0) ||(argc<3))
	{
		cmd_format_print();
		return -1;
	}
	
	

	port_data = malloc(sizeof(struct srio_port_data) * port_num);
	if (!port_data) {
		error(0, errno, "%s(): port_data", __func__);
		goto err_cmd_malloc;
	}

	for (i = 0; i < port_num; i++) {
		fsl_srio_connection(sriodev, i);
		fsl_srio_get_port_info(sriodev, i + 1, &port_data[i].port_info,
				       &port_data[i].range_virt);
	}

	err = fsl_srio_port_connected(sriodev);
	if (err <= 0) {
		error(0, -err, "%s(): fsl_srio_port_connected", __func__);
		goto err_srio_connected;
	}
	uint8_t flag=0;
	if(cmd_param.test_type==1)
	{
		for (i = 0; i < port_num; i++) {
			if(srio_link(sriodev,i))
			{
		  	     printf("port %d sucess!\n",i);
                             fflush(stdout);
			}
			else
			{
			     printf("port %d failed!\n",i);
                             fflush(stdout);
			     flag++;
			}
		}
		if(flag != 0)
			return -1;
		return 0;
	}
	err = dma_pool_init(&dmapool);
	uint8_t port = cmd_param.curr_port_id;
	attr_read = srio_test_win_attrv[3];
	attr_write = srio_test_win_attrv[cmd_param.test_srio_type];

	for (i = 0; i < port_num; i++) {
		dma_addr_t port_phys_base =
			dmapool->dma_phys_base + SRIO_POOL_PORT_OFFSET * i;
		port_data[i].phys.write_recv_data = port_phys_base;
		port_data[i].phys.read_recv_data =
			port_phys_base + SRIO_POOL_SECT_SIZE;
		port_data[i].phys.write_data_prep =
			port_phys_base + SRIO_POOL_SECT_SIZE * 2;
		port_data[i].phys.res =
			port_phys_base + SRIO_POOL_SECT_SIZE * 3;
		
		port_data[i].virt = (typeof(port_data[i].virt))
			(dmapool->dma_virt_base + i * SRIO_POOL_PORT_OFFSET);
		fsl_srio_set_ibwin(sriodev, i, 1,
				   port_data[i].phys.write_recv_data,
				   SRIO_SYS_ADDR, LAWAR_SIZE_8M);
		fsl_srio_set_ibwin(sriodev, i, 2,
				   port_data[i].phys.write_recv_data+0x800000,
				   SRIO_ctl_ADDR, LAWAR_SIZE_8M);
		
		if (fsl_srio_port_connected(sriodev) & (0x1 << i)) {
			fsl_srio_set_obwin(sriodev, i, 1,
				   port_data[i].port_info.range_start,
				   SRIO_SYS_ADDR, LAWAR_SIZE_8M);
			fsl_srio_set_obwin_attr(sriodev, i, 1,
					attr_read, attr_write);

			fsl_srio_set_obwin(sriodev, i, 2,
				   port_data[i].port_info.range_start+0x800000,
				   SRIO_ctl_ADDR, LAWAR_SIZE_8M);
			fsl_srio_set_obwin_attr(sriodev, i, 2,
					attr_read, attr_write);
		} else {
			printf("SRIO port %d error!\n", i + 1);
			fflush(stdout);
			return -errno;
		}
		memset(port_data[i].virt,0,SRIO_POOL_PORT_OFFSET);
	}
        err = fsl_srio_set_targetid(sriodev,0,1,0x11);
	if(err!=0)
        {
		printf("sro set targetid  failed!\n");
                fflush(stdout);
	}
        err = fsl_srio_set_targetid(sriodev,1,1,0x14);
	if(err!=0)
        {
		printf("sro set targetid  failed!\n");
                fflush(stdout);
	}
        err = fsl_srio_set_targetid(sriodev,0,2,0x11);
	if(err!=0)
        {
		printf("sro set targetid  failed!\n");
                fflush(stdout);
	}
        err = fsl_srio_set_targetid(sriodev,1,2,0x14);
	if(err!=0)
        {
		printf("sro set targetid  failed!\n");
                fflush(stdout);
	}
/*ctx add*/
        sleep(5);

	for(i=0;i<1;i++)
	{
		
		err = fsl_dma_chan_init(&send_dmadev[i], 0, i);	
		if (err < 0) {
			error(0, -err, "%s(): fsl_dma_chan_init()", __func__);
			goto err_srio_connected;
		}
		err = fsl_dma_chan_init(&receive_dmadev[i], 1, i);
		if (err < 0) {
			error(0, -err, "%s(): fsl_dma_chan_init()", __func__);
			goto err_srio_connected;
		}
		fsl_dma_chan_basic_direct_init(send_dmadev[i]);
		fsl_dma_chan_bwc(send_dmadev[i], DMA_BWC_1024);
		task_arg_send[i].dmadev = send_dmadev[i];
		
		fsl_dma_chan_basic_direct_init(receive_dmadev[i]);
		fsl_dma_chan_bwc(receive_dmadev[i], DMA_BWC_1024);
		task_arg_receive[i].dmadev = receive_dmadev[i];
		
		task_arg_receive[i].port_data_thread.phys.write_recv_data=port_data[1-port].phys.write_recv_data+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.read_recv_data=port_data[1-port].phys.read_recv_data+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.write_data_prep=port_data[1-port].phys.write_data_prep+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.res=port_data[1-port].phys.res+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.virt.write_recv_data = &port_data[1-port].virt->write_recv_data_t[i][0];
		task_arg_receive[i].port_data_thread.virt.read_recv_data = &port_data[1-port].virt->read_recv_data_t[i][0];
		task_arg_receive[i].port_data_thread.virt.write_data_prep = &port_data[1-port].virt->write_data_prep_t[i][0];
		task_arg_receive[i].port_data_thread.virt.res = &port_data[1-port].virt->res_t[i][0];
		task_arg_receive[i].port_data_thread.port_info.range_start = port_data[1-port].port_info.range_start+THREAD_WIN_SIZE*i; 
		task_arg_receive[i].port = 1-port;

		task_arg_receive[i].srio_type = cmd_param.test_srio_type;
		task_arg_receive[i].cpu = cmd_param.start_cpu;/*bind cpu*/
		task_arg_receive[i].passes = cmd_param.passes;/*bind cpu*/
		/* ctx add*/
		task_arg_send[i].port_data_thread.phys.write_recv_data=port_data[port].phys.write_recv_data+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.read_recv_data=port_data[port].phys.read_recv_data+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.write_data_prep=port_data[port].phys.write_data_prep+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.res=port_data[port].phys.res+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.virt.write_recv_data = &port_data[port].virt->write_recv_data_t[i][0];
		task_arg_send[i].port_data_thread.virt.read_recv_data = &port_data[port].virt->read_recv_data_t[i][0];
		task_arg_send[i].port_data_thread.virt.write_data_prep = &port_data[port].virt->write_data_prep_t[i][0];
		task_arg_send[i].port_data_thread.virt.res = &port_data[port].virt->res_t[i][0];
		task_arg_send[i].port_data_thread.port_info.range_start = port_data[port].port_info.range_start+THREAD_WIN_SIZE*i; 
		/* cta end*/
		task_arg_send[i].port = port;
		task_arg_send[i].srio_type = cmd_param.test_srio_type;
		task_arg_send[i].cpu = cmd_param.start_cpu+1;/*bind cpu*/
		task_arg_send[i].passes = cmd_param.passes;/*bind cpu*/
		if(cmd_param.test_type==2)
		{
			task_arg_send[i].test_type=0;
			err = pthread_create(&send_id[i], NULL,t_srio_send, &task_arg_send[i]);
			if (err) {
				printf("Port %d : Send thread failed!\n",port + 1);
				fflush(stdout);
				return -errno;
			}
                        sleep(1);
			err = pthread_create(&receive_id[i],NULL,t_srio_receive,&task_arg_receive[i]);
			if (err) {
				printf("Port %d : Receive thread failed!\n",2-port);
				fflush(stdout);
				return -errno;
			} 
		} 
		else if(cmd_param.test_type==3)
		{	
			task_arg_send[i].test_type=1;
			err = pthread_create(&send_id[i], NULL,t_srio_send, &task_arg_send[i]);
			if (err) {
				printf("Port %d : Send thread failed!\n",port + 1);
				fflush(stdout);
				return -errno;
			}			
		}	
	}
/*multiplie*/
	for(i=1;i<SEND_THREAD_NUM;i++)
	{
		
		err = fsl_dma_chan_init(&send_dmadev[i], 0, i);	
		if (err < 0) {
			error(0, -err, "%s(): fsl_dma_chan_init()", __func__);
			goto err_srio_connected;
		}
		err = fsl_dma_chan_init(&receive_dmadev[i], 1, i);
		if (err < 0) {
			error(0, -err, "%s(): fsl_dma_chan_init()", __func__);
			goto err_srio_connected;
		}
		fsl_dma_chan_basic_direct_init(send_dmadev[i]);
		fsl_dma_chan_bwc(send_dmadev[i], DMA_BWC_1024);
		task_arg_send[i].dmadev = send_dmadev[i];
		
		fsl_dma_chan_basic_direct_init(receive_dmadev[i]);
		fsl_dma_chan_bwc(receive_dmadev[i], DMA_BWC_1024);
		task_arg_receive[i].dmadev = receive_dmadev[i];
		
		task_arg_receive[i].port_data_thread.phys.write_recv_data=port_data[port].phys.write_recv_data+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.read_recv_data=port_data[port].phys.read_recv_data+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.write_data_prep=port_data[port].phys.write_data_prep+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.phys.res=port_data[port].phys.res+THREAD_WIN_SIZE*i;
		task_arg_receive[i].port_data_thread.virt.write_recv_data = &port_data[port].virt->write_recv_data_t[i][0];
		task_arg_receive[i].port_data_thread.virt.read_recv_data = &port_data[port].virt->read_recv_data_t[i][0];
		task_arg_receive[i].port_data_thread.virt.write_data_prep = &port_data[port].virt->write_data_prep_t[i][0];
		task_arg_receive[i].port_data_thread.virt.res = &port_data[port].virt->res_t[i][0];
		task_arg_receive[i].port_data_thread.port_info.range_start = port_data[port].port_info.range_start+THREAD_WIN_SIZE*i; 
		task_arg_receive[i].port = port;
		task_arg_receive[i].srio_type = cmd_param.test_srio_type;
		task_arg_receive[i].cpu = cmd_param.start_cpu+2;
                task_arg_receive[i].passes = cmd_param.passes;

		task_arg_send[i].port_data_thread.phys.write_recv_data=port_data[1-port].phys.write_recv_data+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.read_recv_data=port_data[1-port].phys.read_recv_data+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.write_data_prep=port_data[1-port].phys.write_data_prep+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.phys.res=port_data[1-port].phys.res+THREAD_WIN_SIZE*i;
		task_arg_send[i].port_data_thread.virt.write_recv_data = &port_data[1-port].virt->write_recv_data_t[i][0];
		task_arg_send[i].port_data_thread.virt.read_recv_data = &port_data[1-port].virt->read_recv_data_t[i][0];
		task_arg_send[i].port_data_thread.virt.write_data_prep = &port_data[1-port].virt->write_data_prep_t[i][0];
		task_arg_send[i].port_data_thread.virt.res = &port_data[1-port].virt->res_t[i][0];
		task_arg_send[i].port_data_thread.port_info.range_start = port_data[1-port].port_info.range_start+THREAD_WIN_SIZE*i; 
		
		task_arg_send[i].port = 1-port;
		task_arg_send[i].srio_type = cmd_param.test_srio_type;
		task_arg_send[i].cpu = cmd_param.start_cpu+3;
		task_arg_send[i].passes = cmd_param.passes;
		if(cmd_param.test_type==2)
		{	
			task_arg_send[i].test_type=0;
			err = pthread_create(&send_id[i],NULL,t_srio_send,&task_arg_send[i]);
			if (err) {
				printf("Port %d : Send thread failed!\n",2-port);
				fflush(stdout);
				return -errno;
			} 
                        sleep(1);
	
			err = pthread_create(&receive_id[i], NULL,t_srio_receive, &task_arg_receive[i]);
			if (err) {
				printf("Port %d : Send thread failed!\n",port + 1);
				fflush(stdout);
				return -errno;
			} 

		}else if(cmd_param.test_type==3)
		{
			task_arg_send[i].test_type=1;	
			err = pthread_create(&send_id[i],NULL,t_srio_send,&task_arg_send[i]);
			if (err) {
				printf("Port %d : Send thread failed!\n",2-port);
				fflush(stdout);
				return -errno;
			} 			
		}
	}

	for(i=0;i<2;i++)
	{
		if(cmd_param.test_type==2)
		{
			pthread_join(send_id[i],NULL);
			pthread_join(receive_id[i],NULL);
		}else if(cmd_param.test_type==3)
		{
			pthread_join(send_id[i],NULL);
		}
	}
/*ctx end*/

        free(port_data);
	for(i=0;i<SEND_THREAD_NUM;i++)
	{
		fsl_dma_chan_finish(send_dmadev[i]);
		fsl_dma_chan_finish(receive_dmadev[i]);
	}
	dma_pool_finish(dmapool);
	fsl_srio_uio_finish(sriodev);

	of_finish();
	return EXIT_SUCCESS;

err_srio_connected:
    free(port_data);
err_cmd_malloc:
	fsl_srio_uio_finish(sriodev);
	of_finish();

	return err;
}
int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
	struct firedtv *fdtv = dvbdmxfeed->demux->priv;
	int pidc, c, ret;
	u16 pids[16];

	switch (dvbdmxfeed->type) {
	case DMX_TYPE_TS:
	case DMX_TYPE_SEC:
		break;
	default:
		dev_err(fdtv->device, "can't start dmx feed: invalid type %u\n",
			dvbdmxfeed->type);
		return -EINVAL;
	}

	if (mutex_lock_interruptible(&fdtv->demux_mutex))
		return -EINTR;

	if (dvbdmxfeed->type == DMX_TYPE_TS) {
		switch (dvbdmxfeed->pes_type) {
		case DMX_TS_PES_VIDEO:
		case DMX_TS_PES_AUDIO:
		case DMX_TS_PES_TELETEXT:
		case DMX_TS_PES_PCR:
		case DMX_TS_PES_OTHER:
			c = alloc_channel(fdtv);
			break;
		default:
			dev_err(fdtv->device,
				"can't start dmx feed: invalid pes type %u\n",
				dvbdmxfeed->pes_type);
			ret = -EINVAL;
			goto out;
		}
	} else {
		c = alloc_channel(fdtv);
	}

	if (c > 15) {
		dev_err(fdtv->device, "can't start dmx feed: busy\n");
		ret = -EBUSY;
		goto out;
	}

	dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c;
	fdtv->channel_pid[c] = dvbdmxfeed->pid;
	collect_channels(fdtv, &pidc, pids);

	if (dvbdmxfeed->pid == 8192) {
		ret = avc_tuner_get_ts(fdtv);
		if (ret) {
			dealloc_channel(fdtv, c);
			dev_err(fdtv->device, "can't get TS\n");
			goto out;
		}
	} else {
		ret = avc_tuner_set_pids(fdtv, pidc, pids);
		if (ret) {
			dealloc_channel(fdtv, c);
			dev_err(fdtv->device, "can't set PIDs\n");
			goto out;
		}
	}
out:
	mutex_unlock(&fdtv->demux_mutex);

	return ret;
}