Exemple #1
0
fmd_event_t *
cpumem_handle_event(fmd_t *pfmd, fmd_event_t *e)
{
	int action = 0;
	int ret = 0;
	uint64_t ev_flag;

	ev_flag = e->ev_flag;
	wr_log(CMEA_LOG_DOMAIN, WR_LOG_DEBUG,
		"handle event, class: %s  flag: 0X%08x, handle_mode: 0X%08x", 
		e->ev_class, ev_flag, e->handle_mode);

	__log_event(e); 
	
	switch (ev_flag) {
	case AGENT_TODO:
		wr_log(CMEA_LOG_DOMAIN, WR_LOG_DEBUG, 
			"cpumem agent handle fault event.");

		if (e->handle_mode & EVENT_HANDLE_MODE_AUTO)
			ret = __handle_fault_event(e);
		/* add other handle mode, eg. user-define. */
		if(e->handle_mode == EVENT_HANDLE_MODE_MANUAL)
			ret = LIST_REPAIRED_MANUAL;
		
		e->event_type = EVENT_LIST;
		__log_event(e);
		action = 1;
		break;
	}

	if (!action) {
		wr_log(CMEA_LOG_DOMAIN, WR_LOG_DEBUG, 
			"cpumem agent log event.");
		return NULL;
	}
	
	wr_log(CMEA_LOG_DOMAIN, WR_LOG_DEBUG,
			"handle event result: %08x", 
			ret);
	
	return (fmd_event_t *)fmd_create_listevent(e, ret);
}
Exemple #2
0
static inline void __log_state(dsscomp_t c, void *fn, u32 ev)
{
#ifdef CONFIG_DSSCOMP_DEBUG_LOG
	if (c->dbg_used < ARRAY_SIZE(c->dbg_log)) {
		u32 t = (u32) ktime_to_ms(ktime_get());
		c->dbg_log[c->dbg_used].t = t;
		c->dbg_log[c->dbg_used++].state = c->state;
		__log_event(20 * c->ix + 20, t, c, ev ? "%pf on %s" : "%pf",
				(u32) fn, (u32) log_status_str(ev));
	}
#endif
}
Exemple #3
0
static void p9_xos_read_work(struct work_struct *work)
{
	struct p9_xos_driver *drv;
	struct p9_xos_endpoint *ep;
	int n;
	unsigned long flags;

	prolog("w=%p", work);

	drv = container_of(work, struct p9_xos_driver, rwork);
	ep = &drv->ep[RD_EP];

	drv->wake_status = 1;

	spin_lock_irqsave(&drv->ep_lock, flags);
	n = p9_xos_deque_pop(ep->lqueue, ep);
	spin_unlock_irqrestore(&drv->ep_lock, flags);
	if (n == deque_null)
		goto done;

	do {
		u16 tag;
		int id;
		unsigned int size;
		struct p9_xos_device *device;
		struct p9_req_t *req;
		u8 *ptr;
		u8 type;

		ptr = n2a(n, ep) + 4;

		id = *(int *)ptr;
		ptr += 4;

		size = le32_to_cpu(*(__le32 *) ptr);
		if (size < 7) {
			critical("ignoring too short request");
			break;
		}

		type = *(ptr + 4);

		__log_event(drv, id, type, RD_EP);

		device = &drv->device[id];

		if (type & 1) {
			if (size >= device->client->msize) {
				warning("requested packet size too big: %d\n",
					size);
				goto ignore;
			}
			tag = le16_to_cpu(*(__le16 *) (ptr + 5));
			req = p9_tag_lookup(device->client, tag);

			if (req == NULL) {
				warning("ignoring unexpected response");
				goto ignore;
			}

			BUG_ON(!req->rc);

			if (likely(req->aio_cb != NULL)) {
				req->rc->sdata = ptr;
				req->status = REQ_STATUS_RCVD;
				p9_client_notify_aio(device->client, req);
			} else {
				req->rc->sdata =
				    (char *)req->rc + sizeof(*req->rc);
				memcpy(req->rc->sdata, ptr, size);
				p9_client_cb(device->client, req);
			}
ignore:
			spin_lock_irqsave(&drv->ep_lock, flags);
			p9_xos_deque_push(ep->rqueue, n, ep);
			nb_free_packets++;
			spin_unlock_irqrestore(&drv->ep_lock, flags);
		} else {
			/*
			 *  Dirty hack for pmu_int server
			 *    pmu_int is on channel 1
			 *    pmu_int client has always a request pending
			 *    so does not keep the wake lock if only
			 *    pmu_int request pending
			 */
			if (likely(device != &drv->device[1]))
				drv->wake_count++;

			if (unlikely(!device->open)) {
				warning("DEVICE %d NOT OPENED, ignoring req",
					device->id);
				goto ignore2;
			}
			req = kmem_cache_alloc(drv->cache, GFP_KERNEL);
			req->tc = kmalloc(sizeof(struct p9_fcall), GFP_KERNEL);
			req->tc->size = size;
			req->tc->sdata = ptr;
			req->aux = device;

			spin_lock(&device->lock);
			list_add_tail(&req->req_list, &device->req_list);
			spin_unlock(&device->lock);

			if (device->rd_cb)
				device->rd_cb(device, req);
		}
ignore2:
		spin_lock_irqsave(&drv->ep_lock, flags);
		n = p9_xos_deque_pop(ep->lqueue, ep);
		spin_unlock_irqrestore(&drv->ep_lock, flags);
	} while (n != deque_null);

done:
	if ((!drv->wake_count) && (drv->wake_status == 1)) {
		drv->wake_status = 0;
		wake_unlock(&drv->wake_lock);
		wmb();
		if (drv->wake_status == 2)
			wake_lock(&drv->wake_lock);
	}
	epilog();
}
Exemple #4
0
static void p9_xos_write_work(struct work_struct *work)
{
	struct p9_xos_driver *drv;
	struct p9_xos_endpoint *ep;
	unsigned long flags;

	prolog("w=%p", work);

	drv = container_of(work, struct p9_xos_driver, wwork);
	ep = &drv->ep[WR_EP];

	spin_lock(&drv->q_lock);
	if (list_empty(&drv->req_list)) {
		clear_bit(WE_BIT, &drv->state);
		spin_unlock(&drv->q_lock);
		goto done;
	}
	spin_unlock(&drv->q_lock);

	do {
		u8 *ptr;
		struct p9_req_t *req;
		struct p9_xos_device *device;
		int n;

		req = list_first_entry(&drv->req_list, struct p9_req_t,
				       req_list);

		spin_lock_irqsave(&drv->ep_lock, flags);
		n = p9_xos_deque_pop(ep->lqueue, ep);
		spin_unlock_irqrestore(&drv->ep_lock, flags);
		if (n == deque_null) {
			ep->regs[STARVATION] = 1;
			break;
		}
		ptr = n2a(n, ep) + 4;

		device = req->aux;
		spin_lock(&drv->q_lock);
		list_del(&req->req_list);
		req->status = REQ_STATUS_SENT;
		spin_unlock(&drv->q_lock);

		*(unsigned int *)ptr = device->id;
		ptr += 4;

		if (req->tc) {
			memcpy(ptr, req->tc->sdata, req->tc->size);
		} else {
			memcpy(ptr, req->rc->sdata, req->rc->size);
			spin_lock(&device->lock);
			BUG_ON(!device->ack_count);
			device->ack_count -= 1;

			/*
			 *  Dirty hack for pmu_int server
			 *    pmu_int is on channel 1
			 *    pmu_int client has always a request pending
			 *    so does not keep the wake lock if only
			 *    pmu_int request pending
			 */
			if (likely(device != &drv->device[1]))
				drv->wake_count--;

			if (device->wr_cb)
				device->wr_cb(device, req);
			kmem_cache_free(drv->cache, req);
			spin_unlock(&device->lock);
		}
		__log_event(drv, device->id, ptr[4], WR_EP);

		spin_lock_irqsave(&drv->ep_lock, flags);
		p9_xos_deque_push(ep->rqueue, n, ep);
		spin_unlock_irqrestore(&drv->ep_lock, flags);

	} while (!list_empty(&drv->req_list));

	queue_work(drv->workqueue, &drv->swork);

done:
	clear_bit(WE_BIT, &drv->state);

	epilog();
}