Exemplo n.º 1
0
void
pdf_write_outlines(PDF *p)
{
    int i;

    if (p->outline_count == 0)          /* no outlines: return */
        return;

    pdc_begin_obj(p->out, p->outlines[0].obj_id); /* root outline object */
    pdc_begin_dict(p->out);

    if (p->outlines[0].count != 0)
        pdc_printf(p->out, "/Count %d\n", COUNT(0));
    pdc_objref(p->out, "/First", OBJ_ID(FIRST(0)));
    pdc_objref(p->out, "/Last", OBJ_ID(LAST(0)));

    pdc_end_dict(p->out);
    pdc_end_obj(p->out);                        /* root outline object */

#define PDF_FLUSH_AFTER_MANY_OUTLINES   1000    /* ca. 50-100 KB */
    for (i = 1; i <= p->outline_count; i++) {
        /* reduce memory usage for many outline entries */
        if (i % PDF_FLUSH_AFTER_MANY_OUTLINES == 0)
            pdc_flush_stream(p->out);

        pdf_write_outline_dict(p, i);
    }
}
Exemplo n.º 2
0
static int grpobj_monitor_members(xid_t xid, obj_t grp_obj,
				  u32 *ready_cnt,
				  u32 *err_cnt)
{
	int rc = MLU_STATUS_SUCCESS;
	struct obj_iter iter;
	struct grp_desc *grp_desc;
	obj_t member = NULL;
	struct obj_generic_properties props;
	bool queued = false;
	
	*ready_cnt = 0;
	*err_cnt = 0;

	grp_desc = GRP_DESC(grp_obj);
	
	if (RTL_HASHTABLE_NR_ENTRIES(&grp_desc->members) == 0) {
		return MLU_STATUS_SUCCESS;
	}

	grpmgr_member_iter_init(xid, grp_obj, &iter);

	do {
		member = grpmgr_member_iter_next_member(&iter);
		if (member == NULL) {
			rc = MLU_STATUS_SUCCESS;
			break;
		}

		/* Establish a wait dependency on the member, regardless
		 * of the member state. We specify the state as EXPIRED
		 * because we'll never find a member in that state. The
		 * goal is to always place a wait dependency on the
		 * member.
		 */
		rc = obj_wait_for_state(xid, member, grp_desc,
					OBJ_STATE_EXPIRED,
					&queued);
		if (rc != 0) {
			MLU_TRACE(TL_ERR, "xid:0x%x failed to wait EXPIRED "
				  "st, RO:"OID_FMT" DO:"OID_FMT"\n",
				  xid, OBJ_ID(OBJ_OID(member)),
				  OBJ_ID(OBJ_OID(grp_desc)));
			goto monitor_members_exit;
		}

		obj_get_generic_properties(member, &props);

		if (props.public_state == OBJ_STATE_READY) {
			(*ready_cnt)++;
		} else if (props.public_state == OBJ_STATE_ERROR) {
			(*err_cnt)++;
		}
	} while (true);

 monitor_members_exit:
	
	return rc;
}
Exemplo n.º 3
0
smmgr_status_t grpobj_goto_ready(xid_t xid, obj_t obj)
{
	int rc = MLU_STATUS_SUCCESS;
	struct grp_desc *grp_desc;
	smmgr_status_t status = SMMGR_STATUS_COMPLETE;
	u32 ready_cnt = 0;
	u32 err_cnt = 0;

	grp_desc = GRP_DESC(obj);
	if (grp_desc->persistent_data.destroy_when_empty &&
	    (RTL_HASHTABLE_NR_ENTRIES(&grp_desc->members) == 0)) {
		rc = obj_destroy(xid, obj);
		if (rc != MLU_STATUS_SUCCESS) {
			MLU_TRACE(TL_ERR, "xid:0x%x failed to destroy "
				  "grp obj "OID_FMT", rc:0x%x\n",
				  xid, OBJ_ID(OBJ_OID(obj)), rc);
			status = SMMGR_STATUS_INCOMPLETE;
		}
		goto ready_exit;
	}

	rc = grpobj_monitor_members(xid, obj, &ready_cnt, &err_cnt);
	if (rc != MLU_STATUS_SUCCESS) {
		MLU_TRACE(TL_ERR, "xid:0x%x failed to monitor members, "
			  "grp obj "OID_FMT", rc:0x%x\n",
			  xid, OBJ_ID(OBJ_OID(obj)), rc);
		status = SMMGR_STATUS_INCOMPLETE;
		goto ready_exit;
	}

	if (err_cnt != 0) {
		MLU_TRACE(TL_WRN, "xid:0x%x grp obj "OID_FMT" err_cnt:%d\n",
			  xid, OBJ_ID(OBJ_OID(obj)), err_cnt);
		obj_fail(xid, obj, -1);
		status = SMMGR_STATUS_INCOMPLETE;
		goto ready_exit;
	}

	if (ready_cnt != RTL_HASHTABLE_NR_ENTRIES(&grp_desc->members)) {
		/* Loop back to ASYNC_CALLS_COMPLETE and run to READY
		 * again. We don't reach READY until all members are
		 * in READY state.
		 */
		obj_set_private_state(xid, obj,
				      GRP_STATE_ASYNC_CALLS_COMPLETE);
		status = SMMGR_STATUS_INCOMPLETE;
		goto ready_exit;
	}

 ready_exit:

	MLU_TRACE(TL_INF, "xid:0x%x grp obj "OID_FMT" ready, sts:0x%x\n",
		  xid, OBJ_ID(OBJ_OID(obj)), status);

	return status;
}
Exemplo n.º 4
0
int cgmgr_create_cg(xid_t xid, struct mlu_ioctl_create_cg_args *argp)
{
	int rc = 0;
	obj_id_t cg_oid;
	obj_t cg_obj = NULL;
	obj_t vu_obj = NULL;
	unsigned int i;

	/* Create group object first */
	rc = obj_create(xid, OBJ_TYPE_CG, argp, &cg_oid);
	if (rc != 0) {
		MLU_TRACE(TL_ERR, "xid:0x%x failed to create CG, rc:0x%x\n",
			  xid, rc);
		goto create_cg_exit;
	}

	cg_obj = objmgr_access_obj(xid, cg_oid);
	if (cg_obj == NULL) {
		rc = MLU_STATUS_NOENT;
		MLU_TRACE(TL_ERR, "xid:0x%x failed to access CG "
			  OID_FMT"\n", xid, OBJ_ID(cg_oid));
		goto create_cg_exit;
	}

	for (i = 0; i < argp->nr_members; i++) {
		vu_obj = objmgr_access_obj(xid, argp->members[i]);
		if (vu_obj == NULL) {
			rc = MLU_STATUS_NOENT;
			MLU_TRACE(TL_ERR, "xid:0x%x failed to access member "
				  OID_FMT"\n", xid, OBJ_ID(argp->members[i]));
			break;
		}

		rc = grpmgr_add_member(xid, cg_obj, vu_obj);
		if (rc != 0) {
			MLU_TRACE(TL_ERR, "xid:0x%x failed to add member "
				  OID_FMT", rc:0x%x\n",
				  xid, OBJ_ID(argp->members[i]), rc);
			break;
		}
	}

	argp->cg_oid = cg_oid;

 create_cg_exit:

	if (rc != 0) {
		// TODO: Do the clean up work
		;
	}

	return rc;
}
Exemplo n.º 5
0
smmgr_status_t grpobj_goto_members_ready(xid_t xid, obj_t obj)
{
	int rc = MLU_STATUS_SUCCESS;
	struct grp_desc *grp_desc;
	smmgr_status_t status = SMMGR_STATUS_COMPLETE;
	u32 ready_cnt = 0;
	u32 err_cnt = 0;

	grp_desc = GRP_DESC(obj);
	if (grp_desc->persistent_data.destroy_when_empty &&
	    (RTL_HASHTABLE_NR_ENTRIES(&grp_desc->members) == 0)) {
		rc = obj_destroy(xid, obj);
		if (rc != MLU_STATUS_SUCCESS) {
			MLU_TRACE(TL_ERR, "xid:0x%x failed to destroy "
				  "grp obj "OID_FMT", rc:0x%x\n",
				  xid, OBJ_ID(OBJ_OID(obj)), rc);
			status = SMMGR_STATUS_INCOMPLETE;
		}
		goto members_ready_exit;
	}

	rc = grpobj_monitor_members(xid, obj, &ready_cnt, &err_cnt);
	if (rc != MLU_STATUS_SUCCESS) {
		MLU_TRACE(TL_ERR, "xid:0x%x failed to monitor members, "
			  "grp obj "OID_FMT", rc:0x%x\n",
			  xid, OBJ_ID(OBJ_OID(obj)), rc);
		status = SMMGR_STATUS_INCOMPLETE;
		goto members_ready_exit;
	}

	if (err_cnt != 0) {
		MLU_TRACE(TL_WRN, "xid:0x%x grp obj "OID_FMT" err_cnt:%d\n",
			  xid, OBJ_ID(OBJ_OID(obj)), err_cnt);
		obj_fail(xid, obj, -1);
		status = SMMGR_STATUS_INCOMPLETE;
		goto members_ready_exit;
	}

	if (ready_cnt != RTL_HASHTABLE_NR_ENTRIES(&grp_desc->members)) {
		status = SMMGR_STATUS_INCOMPLETE;
		goto members_ready_exit;
	}

 members_ready_exit:
	
	MLU_TRACE(TL_INF, "xid:0x%x grp obj "OID_FMT" members ready, sts:0x%x\n",
		  xid, OBJ_ID(OBJ_OID(obj)), status);
	
	return status;
}
Exemplo n.º 6
0
smmgr_status_t grpobj_goto_async_calls_complete(xid_t xid, obj_t obj)
{
	MLU_TRACE(TL_INF, "xid:0x%x grp obj "OID_FMT" async calls complete\n",
		  xid, OBJ_ID(OBJ_OID(obj)));

	return SMMGR_STATUS_COMPLETE;
}
Exemplo n.º 7
0
static int grpmgr_member_compare_fn(void *key, void *entry)
{
	int ret;
	struct grp_membership *membership = (struct grp_membership *)entry;
	obj_id_t oid = OBJ_OID(membership->member_obj);
	obj_id_t key_oid = *(obj_id_t *)key;

	if (OBJ_ID(key_oid) < OBJ_ID(oid)) {
		ret = -1;
	} else if (OBJ_ID(key_oid) > OBJ_ID(oid)) {
		ret = 1;
	} else {
		ret = 0;
	}

	return ret;
}
Exemplo n.º 8
0
struct ib_cq *siw_create_cq(struct ib_device *ofa_dev,
							const struct ib_cq_init_attr *attr,
//							int size,
//			    int vec /* unused */,
			    struct ib_ucontext *ib_context,
			    struct ib_udata *udata)
{
	struct siw_cq			*cq = NULL;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct siw_uresp_create_cq	uresp;
	int rv;

	if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
		dprint(DBG_ON, ": Out of CQ's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (attr->cqe < 1 || attr->cqe> SIW_MAX_CQE) {
		dprint(DBG_ON, ": CQE: %d\n", attr->cqe);
		rv = -EINVAL;
		goto err_out;
	}
	cq = kmalloc(sizeof *cq, GFP_KERNEL);
	if (!cq) {
		dprint(DBG_ON, ":  kmalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}
//	cq->ofa_cq.cqe = size - 1;
	cq->ofa_cq.cqe = attr->cqe - 1;

	rv = siw_cq_add(sdev, cq);
	if (rv)
		goto err_out_idr;

	INIT_LIST_HEAD(&cq->queue);
	spin_lock_init(&cq->lock);
	atomic_set(&cq->qlen, 0);

	if (ib_context) {
		uresp.cq_id = OBJ_ID(cq);

		rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (rv)
			goto err_out_idr;
	}
	return &cq->ofa_cq;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
err_out:
	dprint(DBG_OBJ, ": CQ creation failed\n");

	kfree(cq);
	atomic_dec(&sdev->num_cq);

	return ERR_PTR(rv);
}
Exemplo n.º 9
0
smmgr_status_t grpobj_goto_error(xid_t xid, obj_t obj)
{
	grpobj_unmonitor_members(xid, obj);

	MLU_TRACE(TL_INF, "xid:0x%x grp obj "OID_FMT" error\n",
		  xid, OBJ_ID(OBJ_OID(obj)));
	
	return SMMGR_STATUS_COMPLETE;
}
Exemplo n.º 10
0
int cgmgr_destroy_cg(xid_t xid, struct mlu_ioctl_destroy_cg_args *argp)
{
	int rc = 0;
	obj_t cg_obj = NULL;
	obj_t member = NULL;
	struct obj_iter iter;

	cg_obj = objmgr_access_obj(xid, argp->cg_oid);
	if (cg_obj == NULL) {
		rc = MLU_STATUS_NOENT;
		MLU_TRACE(TL_ERR, "xid:0x%x failed to access CG "
			  OID_FMT"\n", xid, OBJ_ID(argp->cg_oid));
		goto destroy_cg_exit;
	}

	grpmgr_member_iter_init(xid, cg_obj, &iter);

	do {
		member = grpmgr_member_iter_next_member(&iter);
		if (member == NULL) {
			break;
		}

		rc = grpmgr_remove_member(xid, cg_obj, member);
		if (rc != 0) {
			MLU_TRACE(TL_ERR, "xid:0x%x failed to remove "
				  "member "OID_FMT", rc:0x%x\n",
				  xid, OBJ_ID(OBJ_OID(member)), rc);
		}
	} while (true);

	rc = obj_destroy(xid, cg_obj);
	if (rc != 0) {
		MLU_TRACE(TL_ERR, "xid:0x%x failed to destroy CG "
			  OID_FMT", rc:0x%x\n",
			  xid, OBJ_ID(argp->cg_oid), rc);
		obj_fail(xid, cg_obj, rc);
	}

 destroy_cg_exit:
	
	return rc;
}
Exemplo n.º 11
0
smmgr_status_t grpobj_goto_expired(xid_t xid, obj_t obj)
{
	struct grp_desc *grp_desc;

	grp_desc = GRP_DESC(obj);
	ASSERT(grp_desc->members.nr_entries == 0);

	MLU_TRACE(TL_INF, "xid:0x%x grp obj "OID_FMT" expired\n",
		  xid, OBJ_ID(OBJ_OID(obj)));
	
	return SMMGR_STATUS_COMPLETE;
}
Exemplo n.º 12
0
static int evthdlr_process_lu_do_scn(struct evthdlr_request *req)
{
	int rc = 0;
	struct list_head *head = NULL;
	struct list_head *entry = NULL;
	struct lu_scn_info *scn_info = NULL;
	struct lu_do_scn_ctx *do_scn = NULL;
	lu_do_scn_fn scn_fn = NULL;
	void *scn_ctx = NULL;

	do_scn = &req->ctx.lu_do_scn;
	head = &evthdlr_globals.lu_scn_q;
	
	spin_lock(&evthdlr_globals.evthdlr_lock);
	list_for_each(entry, head) {
		scn_info = list_entry(entry, struct lu_scn_info, link);
		if (OBJ_ID(scn_info->lu_oid) == OBJ_ID(do_scn->lu_oid)) {
			scn_fn = scn_info->scn_fn;
			scn_ctx = scn_info->ctx;
			break;
		}
	}
Exemplo n.º 13
0
static PyObject*
jones_ipy_obj_get_name (PyObject *self, PyObject *args)
{
  OBJECT  *o;
  long    p_o;

     
  if(!PyArg_ParseTuple(args, "l", &p_o))
    return NULL;
  o = (OBJECT*) p_o;

  return Py_BuildValue("s",  OBJ_ID(o));
}
Exemplo n.º 14
0
static void siw_free_mem(struct kref *ref)
{
	struct siw_mem *m;

	m = container_of(container_of(ref, struct siw_objhdr, ref),
			 struct siw_mem, hdr);

	dprint(DBG_MM|DBG_OBJ, "(MEM%d): Free Object\n", OBJ_ID(m));

	atomic_dec(&m->hdr.sdev->num_mem);

	if (SIW_MEM_IS_MW(m)) {
		struct siw_mw *mw = container_of(m, struct siw_mw, mem);
		kfree(mw);
	} else {
Exemplo n.º 15
0
/*
 * siw_poll_cq()
 *
 * Reap CQ entries if available and copy work completion status into
 * array of WC's provided by caller. Returns number of reaped CQE's.
 *
 * @ofa_cq:	OFA CQ contained in siw CQ.
 * @num_cqe:	Maximum number of CQE's to reap.
 * @wc:		Array of work completions to be filled by siw.
 */
int siw_poll_cq(struct ib_cq *ofa_cq, int num_cqe, struct ib_wc *wc)
{
	struct siw_cq		*cq  = siw_cq_ofa2siw(ofa_cq);
	int			i;

	for (i = 0; i < num_cqe; i++) {
		if (!(siw_reap_cqe(cq, wc)))
			break;
		wc++;
	}
	dprint(DBG_CQ, " CQ%d: reap %d completions (%d left)\n",
		OBJ_ID(cq), i, atomic_read(&cq->qlen));

	return i;
}
Exemplo n.º 16
0
/*
 * siw_req_notify_cq()
 *
 * Request notification for new CQE's added to that CQ.
 * Defined flags:
 * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
 *   event if a WQE with notification flag set enters the CQ
 * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
 *   event if a WQE enters the CQ.
 * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
 *   number of not reaped CQE's regardless of its notification
 *   type and current or new CQ notification settings.
 *
 * @ofa_cq:	OFA CQ contained in siw CQ.
 * @flags:	Requested notification flags.
 */
int siw_req_notify_cq(struct ib_cq *ofa_cq, enum ib_cq_notify_flags flags)
{
	struct siw_cq	 *cq  = siw_cq_ofa2siw(ofa_cq);

	dprint(DBG_EH, "(CQ%d:) flags: 0x%8x\n", OBJ_ID(cq), flags);

	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
		cq->notify = SIW_CQ_NOTIFY_SOLICITED;
	else
		cq->notify = SIW_CQ_NOTIFY_ALL;

	if (flags & IB_CQ_REPORT_MISSED_EVENTS)
		return atomic_read(&cq->qlen);

	return 0;
}
Exemplo n.º 17
0
void siw_cq_put(struct siw_cq *cq)
{
	pr_debug(DBG_OBJ "(CQ%d): Old refcount: %d\n",
		OBJ_ID(cq), kref_read(&cq->hdr.ref));
	kref_put(&cq->hdr.ref, siw_free_cq);
}
Exemplo n.º 18
0
/*
 * siw_create_cq()
 *
 * Create CQ of requested size on given device.
 *
 * @ofa_dev:	OFA device contained in siw device
 * @size:	maximum number of CQE's allowed.
 * @ib_context: user context.
 * @udata:	used to provide CQ ID back to user.
 */
static struct ib_cq *do_siw_create_cq(struct ib_device *ofa_dev,
				      const struct ib_cq_init_attr *init_attr,
				      struct ib_ucontext *ib_context,
				      struct ib_udata *udata)
{
	struct siw_ucontext		*ctx;
	struct siw_cq			*cq = NULL;
	struct siw_dev			*sdev = siw_dev_ofa2siw(ofa_dev);
	struct urdma_uresp_create_cq	uresp;
	int rv;

	if (!ofa_dev) {
		pr_warn("NO OFA device\n");
		rv = -ENODEV;
		goto err_out;
	}
	if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
		pr_debug(": Out of CQ's\n");
		rv = -ENOMEM;
		goto err_out;
	}
	if (init_attr->cqe < 1) {
		pr_debug(": CQE: %d\n", init_attr->cqe);
		rv = -EINVAL;
		goto err_out;
	}
	cq = kzalloc(sizeof *cq, GFP_KERNEL);
	if (!cq) {
		pr_debug(":  kmalloc\n");
		rv = -ENOMEM;
		goto err_out;
	}
	cq->ofa_cq.cqe = init_attr->cqe;

	if (!ib_context) {
		rv = -EINVAL;
		goto err_out;
	}
	ctx = siw_ctx_ofa2siw(ib_context);

	rv = siw_cq_add(sdev, cq);
	if (rv)
		goto err_out;

	uresp.cq_id = OBJ_ID(cq);

	rv = ib_copy_to_udata(udata, &uresp, sizeof uresp);
	if (rv)
		goto err_out_idr;

	return &cq->ofa_cq;

err_out_idr:
	siw_remove_obj(&sdev->idr_lock, &sdev->cq_idr, &cq->hdr);
err_out:
	pr_debug(DBG_OBJ ": CQ creation failed %d", rv);

	kfree(cq);
	atomic_dec(&sdev->num_cq);

	return ERR_PTR(rv);
}
Exemplo n.º 19
0
static void
pdf_write_outline_dict(PDF *p, int entry)
{
    pdf_outline *outline = &p->outlines[entry];
    pdc_id act_idlist[PDF_MAX_EVENTS];

    /* write action objects */
    if (outline->action)
        pdf_parse_and_write_actionlist(p, event_bookmark, act_idlist,
                                       (const char *) outline->action);

    pdc_begin_obj(p->out, OBJ_ID(entry));   /* outline object */
    pdc_begin_dict(p->out);

    pdc_objref(p->out, "/Parent", OBJ_ID(PARENT(entry)));

    /* outline destination */
    if (outline->dest)
    {
        pdc_puts(p->out, "/Dest");
        pdf_write_destination(p, outline->dest);
    }

    /* write Action entries */
    else if (outline->action)
        pdf_write_action_entries(p, event_bookmark, act_idlist);

    pdc_puts(p->out, "/Title"); /* outline text */
    pdf_put_hypertext(p, outline->text);
    pdc_puts(p->out, "\n");

    if (PREV(entry))
        pdc_objref(p->out, "/Prev", OBJ_ID(PREV(entry)));
    if (NEXT(entry))
        pdc_objref(p->out, "/Next", OBJ_ID(NEXT(entry)));

    if (FIRST(entry)) {
        pdc_objref(p->out, "/First", OBJ_ID(FIRST(entry)));
        pdc_objref(p->out, "/Last", OBJ_ID(LAST(entry)));
    }
    if (COUNT(entry)) {
        if (OPEN(entry))
            pdc_printf(p->out, "/Count %d\n", COUNT(entry));    /* open */
        else
            pdc_printf(p->out, "/Count %d\n", -COUNT(entry));/* closed */
    }

    /* Color */
    if (outline->textcolor[0] != 0.0 ||
        outline->textcolor[1] != 0.0 ||
        outline->textcolor[2] != 0.0)
        pdc_printf(p->out, "/C[%f %f %f]\n", outline->textcolor[0],
                                              outline->textcolor[1],
                                              outline->textcolor[2]);

    /* FontStyle */
    if (outline->fontstyle != fnt_Normal)
    {
        int fontstyle = 0;
        if (outline->fontstyle == fnt_Bold)
            fontstyle = 2;
        if (outline->fontstyle == fnt_Italic)
            fontstyle = 1;
        if (outline->fontstyle == fnt_BoldItalic)
            fontstyle = 3;
        pdc_printf(p->out, "/F %d\n", fontstyle);
    }

    pdc_end_dict(p->out);
    pdc_end_obj(p->out);                        /* outline object */
}
Exemplo n.º 20
0
int
parse (char *cmd)
{
  char p1[2048], p2[2048];
  int  v;

  if (!strncasecmp (cmd, "exit", strlen ("exit")) || cmd[0] == 'q')
    {
      printf ("Exiting...\n");
      return 0;
    }
  else if (!strncasecmp (cmd, "auto", strlen ("auto")))
    {
      auto_eval ^= 1;
      printf ("Auto evaluation: %s\n", auto_eval ? "ON" : "OFF");
    }

  else if (!strncasecmp (cmd, "list", strlen ("list")))
    {
      jones_kb_dump_objects (_kb);
      jones_kb_dump_rules (_kb);
    }
  else if (!strncasecmp (cmd, "load", strlen ("load")))
    {
      char *str;

      str = cmd;
      str += strlen("load") + 1;
      str[strlen(str) - 1] = 0;

      cmd_load (str);

    }
  /* --------------------------------------------------*/ 
  /* XXX: New functions to migrate to the KB interface */
  else if (!strncasecmp (cmd, "lena", strlen ("lena")))
    {
      LENA_EXPR* e;
      cmd[strlen(cmd) - 1] = 0;

      if ((e = jones_kb_add_rule (_kb, cmd + strlen("lena "))))
	{
	  printf ("Rule %s created\n", OBJ_ID(e));
	}
    }
  else if (!strncasecmp (cmd, "set", strlen ("set")))
    {
      sscanf (cmd + strlen ("set "), "%s %s", p1, p2);
      if (p2[0] == 'T' || p2[0] == 't') v = FACT_TRUE;
      else if (p2[0] == 'F' || p2[0] == 'f') v = FACT_FALSE;
      else v = FACT_UNKNOWN;

      jones_kb_add_fact (_kb, p1, v, NULL);
    }
  else if (!strncasecmp (cmd, "run", strlen ("run")))
    {
      jones_kb_run (_kb);
    }
  
  else if (cmd[strlen(cmd) - 2] == '?') // FACT Query
    {
      jones_kb_fact_query (_kb, cmd);
    }
  

  return 1;
}
Exemplo n.º 21
0
{
	struct siw_mem *m;

	m = container_of(container_of(ref, struct siw_objhdr, ref),
			 struct siw_mem, hdr);

	dprint(DBG_MM|DBG_OBJ, "(MEM%d): Free Object\n", OBJ_ID(m));

	atomic_dec(&m->hdr.sdev->num_mem);

	if (SIW_MEM_IS_MW(m)) {
		struct siw_mw *mw = container_of(m, struct siw_mw, mem);
		kfree(mw);
	} else {
		struct siw_mr *mr = container_of(m, struct siw_mr, mem);
		dprint(DBG_MM|DBG_OBJ, "(MEM%d): Release UMem\n", OBJ_ID(m));
		if (mr->umem)
			siw_umem_release(mr->umem);
		kfree(mr);
	}
}


void siw_cq_put(struct siw_cq *cq)
{
	dprint(DBG_OBJ, "(CQ%d): Old refcount: %d\n",
		OBJ_ID(cq), atomic_read(&cq->hdr.ref.refcount));
	kref_put(&cq->hdr.ref, siw_free_cq);
}

void siw_qp_put(struct siw_qp *qp)
Exemplo n.º 22
0
void siw_pd_put(struct siw_pd *pd)
{
	pr_debug(DBG_OBJ "(PD%d): Old refcount: %d\n",
		OBJ_ID(pd), kref_read(&pd->hdr.ref));
	kref_put(&pd->hdr.ref, siw_free_pd);
}