Пример #1
0
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
	struct i915_mmu_object *mo = obj->userptr.mmu_object;

	/*
	 * During mm_invalidate_range we need to cancel any userptr that
	 * overlaps the range being invalidated. Doing so requires the
	 * struct_mutex, and that risks recursion. In order to cause
	 * recursion, the user must alias the userptr address space with
	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
	 * to invalidate that mmaping, mm_invalidate_range is called with
	 * the userptr address *and* the struct_mutex held.  To prevent that
	 * we set a flag under the i915_mmu_notifier spinlock to indicate
	 * whether this object is valid.
	 */
	if (!mo)
		return;

	spin_lock(&mo->mn->lock);
	if (value)
		add_object(mo);
	else
		del_object(mo);
	spin_unlock(&mo->mn->lock);
}
Пример #2
0
static int
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
			      bool value)
{
	int ret = 0;

	/* During mm_invalidate_range we need to cancel any userptr that
	 * overlaps the range being invalidated. Doing so requires the
	 * struct_mutex, and that risks recursion. In order to cause
	 * recursion, the user must alias the userptr address space with
	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
	 * to invalidate that mmaping, mm_invalidate_range is called with
	 * the userptr address *and* the struct_mutex held.  To prevent that
	 * we set a flag under the i915_mmu_notifier spinlock to indicate
	 * whether this object is valid.
	 */
#if defined(CONFIG_MMU_NOTIFIER)
	if (obj->userptr.mmu_object == NULL)
		return 0;

	spin_lock(&obj->userptr.mmu_object->mn->lock);
	/* In order to serialise get_pages with an outstanding
	 * cancel_userptr, we must drop the struct_mutex and try again.
	 */
	if (!value)
		del_object(obj->userptr.mmu_object);
	else if (!work_pending(&obj->userptr.mmu_object->work))
		add_object(obj->userptr.mmu_object);
	else
		ret = -EAGAIN;
	spin_unlock(&obj->userptr.mmu_object->mn->lock);
#endif

	return ret;
}
Пример #3
0
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
	struct i915_mmu_object *mo;

	mo = fetch_and_zero(&obj->userptr.mmu_object);
	if (!mo)
		return;

	spin_lock(&mo->mn->lock);
	del_object(mo);
	spin_unlock(&mo->mn->lock);
	kfree(mo);
}
Пример #4
0
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
						       struct mm_struct *mm,
						       unsigned long start,
						       unsigned long end,
						       bool blockable)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct i915_mmu_object *mo;
	struct interval_tree_node *it;
	LIST_HEAD(cancelled);

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end--;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, start, end);
	while (it) {
		if (!blockable) {
			spin_unlock(&mn->lock);
			return -EAGAIN;
		}
		/* The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		mo = container_of(it, struct i915_mmu_object, it);
		if (kref_get_unless_zero(&mo->obj->base.refcount))
			queue_work(mn->wq, &mo->work);

		list_add(&mo->link, &cancelled);
		it = interval_tree_iter_next(it, start, end);
	}
	list_for_each_entry(mo, &cancelled, link)
		del_object(mo);
	spin_unlock(&mn->lock);

	if (!list_empty(&cancelled))
		flush_workqueue(mn->wq);

	return 0;
}
Пример #5
0
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
	struct i915_mmu_object *mo;

	mo = obj->userptr.mmu_object;
	if (mo == NULL)
		return;

	spin_lock(&mo->mn->lock);
	del_object(mo);
	spin_unlock(&mo->mn->lock);
	kfree(mo);

	obj->userptr.mmu_object = NULL;
}
Пример #6
0
/*-------------------------------------------------------------------------*\
 * Method: n.delete(self)
 * Delete a node. All associated edges are deleted as well.
 * Returns non-nil on success.
 * Example:
 * rv, err = n:delete(h)
\*-------------------------------------------------------------------------*/
static int gr_delete(lua_State *L)
{
  Agraph_t *g;
  gr_node_t *ud = tonode(L, 1, NONSTRICT);
  if (ud->n != NULL){
    /* Delete all associated edges with tail on this node */
    Agedge_t *se, *ne;
    int ix;
    g = ud->n->graph;
    se = agfstedge(g, ud->n);
    while (se){
      ne = agnxtedge(g, se, ud->n);
      ix = get_object(L, se);         /* ud, se */
      if (!(lua_isnil(L, -ix))){
	TRACE("n:delete(): closing subedge: ud=%p 'edge@%d' id=0x%0x e=%p\n", 
	       (void *) lua_touserdata(L, -ix), AGID(se), AGID(se), (void *)se);
	lua_pushcfunction(L, gr_delete_edge); /* ud, se, func */
	lua_pushvalue(L, -2);                 /* ud, se, func, se */
	lua_call(L, 1, LUA_MULTRET);          /* ud, se */
	lua_pop(L, 1);                        /* ud */
      } else {
	lua_pop(L, 2);                        /* ud */
      }
      se = ne;
    }
    TRACE("n:delete(): ud=%p '%s' id=0x%0x \n", 
	  (void *) ud, agnameof(ud->n), AGID(ud->n));
    del_object(L, ud->n);
    agdelete(g, ud->n);
    ud->n = NULL;
    if (ud->name){
      free(ud->name);
      ud->name = NULL;
    }
  } else {
    TRACE("n:delete(): ud=%p already closed\n", (void *)ud);
  }
  lua_pushnumber(L, 0);
  return 1;
}
Пример #7
0
/*-------------------------------------------------------------------------*\
 * Method: e.delete(self)
 * Delete an edge.
 * Returns non-nil on success.
 * Example:
 * rv, err = n:delete()
\*-------------------------------------------------------------------------*/
static int gr_delete(lua_State *L)
{
  int rv = -1;
  gr_edge_t *ud = toedge(L, 1, NONSTRICT);
  Agraph_t *g;
  if (ud->e != NULL){
    g = ud->e->tail->graph;
    TRACE("e.delete(): edge: ud=%p '%s' id=0x%x e=%p\n", 
	   (void *) ud, ud->name, AGID(ud->e), (void *)ud->e);
    del_object(L, ud->e);
    agdelete(g, ud->e);
    ud->e = NULL;
    if (ud->name){
      free(ud->name);
      ud->name = NULL;
    }
  } else {
    TRACE("e:delete(): ud=%p already closed\n", (void *)ud);
  }
  lua_pushnumber(L, rv);
  return 1;
}
Пример #8
0
    int KvMetaService::handle(common::BasePacket* packet)
    {
      assert(NULL != packet);
      int ret = TFS_SUCCESS;
      int32_t pcode = packet->getPCode();
      if (!is_inited_)
      {
        ret = EXIT_NOT_INIT_ERROR;
      }
      else
      {
        switch (pcode)
        {
          case REQ_KVMETA_GET_SERVICE_MESSAGE:
            ret = get_service(dynamic_cast<ReqKvMetaGetServiceMessage*>(packet));
            break;
          case REQ_KVMETA_PUT_OBJECT_MESSAGE:
            ret = put_object(dynamic_cast<ReqKvMetaPutObjectMessage*>(packet));
            break;
          case REQ_KVMETA_GET_OBJECT_MESSAGE:
            ret = get_object(dynamic_cast<ReqKvMetaGetObjectMessage*>(packet));
            break;
          case REQ_KVMETA_DEL_OBJECT_MESSAGE:
            ret = del_object(dynamic_cast<ReqKvMetaDelObjectMessage*>(packet));
            break;
          case REQ_KVMETA_HEAD_OBJECT_MESSAGE:
            ret = head_object(dynamic_cast<ReqKvMetaHeadObjectMessage*>(packet));
            break;
          case REQ_KVMETA_PUT_OBJECT_USER_METADATA_MESSAGE:
            ret = put_object_user_metadata(dynamic_cast<ReqKvMetaPutObjectUserMetadataMessage*>(packet));
            break;
          case REQ_KVMETA_DEL_OBJECT_USER_METADATA_MESSAGE:
            ret = del_object_user_metadata(dynamic_cast<ReqKvMetaDelObjectUserMetadataMessage*>(packet));
            break;
          case REQ_KVMETA_PUT_BUCKET_MESSAGE:
            ret = put_bucket(dynamic_cast<ReqKvMetaPutBucketMessage*>(packet));
            break;
          case REQ_KVMETA_GET_BUCKET_MESSAGE:
            ret = get_bucket(dynamic_cast<ReqKvMetaGetBucketMessage*>(packet));
            break;
          case REQ_KVMETA_DEL_BUCKET_MESSAGE:
            ret = del_bucket(dynamic_cast<ReqKvMetaDelBucketMessage*>(packet));
            break;
          case REQ_KVMETA_HEAD_BUCKET_MESSAGE:
            ret = head_bucket(dynamic_cast<ReqKvMetaHeadBucketMessage*>(packet));
            break;
          case REQ_KVMETA_SET_LIFE_CYCLE_MESSAGE:
            ret = set_file_lifecycle(dynamic_cast<ReqKvMetaSetLifeCycleMessage*>(packet));
            break;
          case REQ_KVMETA_GET_LIFE_CYCLE_MESSAGE:
            ret = get_file_lifecycle(dynamic_cast<ReqKvMetaGetLifeCycleMessage*>(packet));
            break;
          case REQ_KVMETA_RM_LIFE_CYCLE_MESSAGE:
            ret = rm_file_lifecycle(dynamic_cast<ReqKvMetaRmLifeCycleMessage*>(packet));
            break;
          case REQ_KVMETA_PUT_BUCKET_ACL_MESSAGE:
            ret = put_bucket_acl(dynamic_cast<ReqKvMetaPutBucketAclMessage*>(packet));
            break;
          case REQ_KVMETA_GET_BUCKET_ACL_MESSAGE:
            ret = get_bucket_acl(dynamic_cast<ReqKvMetaGetBucketAclMessage*>(packet));
            break;
          default:
            ret = EXIT_UNKNOWN_MSGTYPE;
            TBSYS_LOG(ERROR, "unknown msg type: %d", pcode);
            break;
        }
      }

      if (ret != TFS_SUCCESS)
      {
        packet->reply_error_packet(TBSYS_LOG_LEVEL(INFO), ret, "execute message failed");
      }

      return EASY_OK;
    }
Пример #9
0
    bool KvMetaService::handlePacketQueue(tbnet::Packet *packet, void *args)
    {
      int ret = true;
      BasePacket* base_packet = NULL;
      if (!(ret = BaseService::handlePacketQueue(packet, args)))
      {
        TBSYS_LOG(ERROR, "call BaseService::handlePacketQueue fail. ret: %d", ret);
      }
      else
      {
        base_packet = dynamic_cast<BasePacket*>(packet);
        switch (base_packet->getPCode())
        {
          case REQ_KVMETA_GET_SERVICE_MESSAGE:
            ret = get_service(dynamic_cast<ReqKvMetaGetServiceMessage*>(base_packet));
            break;
          case REQ_KVMETA_PUT_OBJECT_MESSAGE:
            ret = put_object(dynamic_cast<ReqKvMetaPutObjectMessage*>(base_packet));
            break;
          case REQ_KVMETA_GET_OBJECT_MESSAGE:
            ret = get_object(dynamic_cast<ReqKvMetaGetObjectMessage*>(base_packet));
            break;
          case REQ_KVMETA_DEL_OBJECT_MESSAGE:
            ret = del_object(dynamic_cast<ReqKvMetaDelObjectMessage*>(base_packet));
            break;
          case REQ_KVMETA_HEAD_OBJECT_MESSAGE:
            ret = head_object(dynamic_cast<ReqKvMetaHeadObjectMessage*>(base_packet));
            break;
          case REQ_KVMETA_PUT_OBJECT_USER_METADATA_MESSAGE:
            ret = put_object_user_metadata(dynamic_cast<ReqKvMetaPutObjectUserMetadataMessage*>(base_packet));
            break;
          case REQ_KVMETA_DEL_OBJECT_USER_METADATA_MESSAGE:
            ret = del_object_user_metadata(dynamic_cast<ReqKvMetaDelObjectUserMetadataMessage*>(base_packet));
            break;
          case REQ_KVMETA_PUT_BUCKET_MESSAGE:
            ret = put_bucket(dynamic_cast<ReqKvMetaPutBucketMessage*>(base_packet));
            break;
          case REQ_KVMETA_GET_BUCKET_MESSAGE:
            ret = get_bucket(dynamic_cast<ReqKvMetaGetBucketMessage*>(base_packet));
            break;
          case REQ_KVMETA_DEL_BUCKET_MESSAGE:
            ret = del_bucket(dynamic_cast<ReqKvMetaDelBucketMessage*>(base_packet));
            break;
          case REQ_KVMETA_HEAD_BUCKET_MESSAGE:
            ret = head_bucket(dynamic_cast<ReqKvMetaHeadBucketMessage*>(base_packet));
            break;
          case REQ_KVMETA_SET_LIFE_CYCLE_MESSAGE:
            ret = set_file_lifecycle(dynamic_cast<ReqKvMetaSetLifeCycleMessage*>(base_packet));
            break;
          case REQ_KVMETA_GET_LIFE_CYCLE_MESSAGE:
            ret = get_file_lifecycle(dynamic_cast<ReqKvMetaGetLifeCycleMessage*>(base_packet));
            break;
          case REQ_KVMETA_RM_LIFE_CYCLE_MESSAGE:
            ret = rm_file_lifecycle(dynamic_cast<ReqKvMetaRmLifeCycleMessage*>(base_packet));
            break;
          case REQ_KVMETA_PUT_BUCKET_ACL_MESSAGE:
            ret = put_bucket_acl(dynamic_cast<ReqKvMetaPutBucketAclMessage*>(base_packet));
            break;
          case REQ_KVMETA_GET_BUCKET_ACL_MESSAGE:
            ret = get_bucket_acl(dynamic_cast<ReqKvMetaGetBucketAclMessage*>(base_packet));
            break;
          default:
            ret = EXIT_UNKNOWN_MSGTYPE;
            TBSYS_LOG(ERROR, "unknown msg type: %d", base_packet->getPCode());
            break;
        }
      }

      if (ret != TFS_SUCCESS && NULL != base_packet)
      {
        base_packet->reply_error_packet(TBSYS_LOG_LEVEL(INFO), ret, "execute message failed");
      }

      // always return true. packet will be freed by caller
      return true;
    }