Esempio n. 1
0
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
	struct task_struct *g, *p;
	unsigned long flags;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
	"------------------------------------------------------"
	"----------------------------------------------------\n");

	read_lock_irqsave(&tasklist_lock, flags);

	do_each_thread(g, p) {
		if (!p->on_rq || task_cpu(p) != rq_cpu)
			continue;

		print_task(m, rq, p);
	} while_each_thread(g, p);

	read_unlock_irqrestore(&tasklist_lock, flags);
}
Esempio n. 2
0
/*
 * This version of gettimeofday has near microsecond resolution.
 */
void do_gettimeofday(struct timeval *tv)
{
    extern volatile unsigned long wall_jiffies;
    unsigned long flags;
    unsigned long usec, sec, lost;

    read_lock_irqsave(&xtime_lock, flags);
    usec = 0;
    lost = jiffies - wall_jiffies;
    if (lost)
        usec += lost * (1000000/HZ);
    sec = xtime.tv_sec;
    usec += xtime.tv_usec;
    read_unlock_irqrestore(&xtime_lock, flags);

    while (usec >= 1000000) {
        usec -= 1000000;
        sec++;
    }

    tv->tv_sec = sec;
    tv->tv_usec = usec;
}
Esempio n. 3
0
/*
 * Get Home Agent Address for an interface
 */
int mipv6_ha_get_addr(int ifindex, struct in6_addr *addr)
{
	unsigned long flags;
	struct getaddr_iterator_args args;
	struct net_device *dev;

	if (ifindex <= 0)
		return -1;

	if ((dev = dev_get_by_index(ifindex)) == NULL)
		return -1;

	memset(addr, 0, sizeof(struct in6_addr));
	args.dev = dev;
	args.addr = addr;
	read_lock_irqsave(&home_agents->lock, flags);
	hashlist_iterate(home_agents->entries, &args, getaddr_iterator);
#ifdef CONFIG_IPV6_MOBILITY_DEBUG
	printk(KERN_INFO "%s: interface = %s\n", __FUNCTION__, dev->name);
	printk(KERN_INFO "%s: home agent = %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", __FUNCTION__,
		ntohs(args.addr->s6_addr16[0]),
		ntohs(args.addr->s6_addr16[1]),
		ntohs(args.addr->s6_addr16[2]),
		ntohs(args.addr->s6_addr16[3]),
		ntohs(args.addr->s6_addr16[4]),
		ntohs(args.addr->s6_addr16[5]),
		ntohs(args.addr->s6_addr16[6]),
		ntohs(args.addr->s6_addr16[7]));
#endif
	read_unlock_irqrestore(&home_agents->lock, flags);
	dev_put(dev);

	if (ipv6_addr_any(addr))
		return -1;
	
	return 0;
}
Esempio n. 4
0
/**
 * rdma_query_gid - Read the GID content from the GID software cache
 * @device:		Device to query the GID
 * @port_num:		Port number of the device
 * @index:		Index of the GID table entry to read
 * @gid:		Pointer to GID where to store the entry's GID
 *
 * rdma_query_gid() only reads the GID entry content for requested device,
 * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
 * hold any reference to the GID table entry in the HCA or software cache.
 *
 * Returns 0 on success or appropriate error code.
 *
 */
int rdma_query_gid(struct ib_device *device, u8 port_num,
		   int index, union ib_gid *gid)
{
	struct ib_gid_table *table;
	unsigned long flags;
	int res = -EINVAL;

	if (!rdma_is_port_valid(device, port_num))
		return -EINVAL;

	table = rdma_gid_table(device, port_num);
	read_lock_irqsave(&table->rwlock, flags);

	if (index < 0 || index >= table->sz ||
	    !is_gid_entry_valid(table->data_vec[index]))
		goto done;

	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
	res = 0;

done:
	read_unlock_irqrestore(&table->rwlock, flags);
	return res;
}
Esempio n. 5
0
/**
 * mipv6_bcache_get - get entry from Binding Cache
 * @home_addr: address to search
 * @entry: pointer to buffer
 *
 * Gets a copy of Binding Cache entry for @home_addr.  Entry's
 * @last_used field is updated.  If entry exists entry is copied to
 * @entry and zero is returned.  Otherwise returns negative.
 **/
int mipv6_bcache_get(
	struct in6_addr *home_addr, 
	struct mipv6_bcache_entry *entry)
{
	unsigned long flags;
	struct mipv6_bcache_entry *entry2;

	DEBUG_FUNC();
  
	if (home_addr == NULL || entry == NULL) 
		return -1;

	read_lock_irqsave(&bcache->lock, flags);

	entry2 = (struct mipv6_bcache_entry *) 
		hashlist_get(bcache->entries, home_addr);
	if (entry2 != NULL) {
		entry2->last_used = jiffies;
		memcpy(entry, entry2, sizeof(struct mipv6_bcache_entry));
	}

	read_unlock_irqrestore(&bcache->lock, flags);
	return (entry2 == NULL)? -1 : 0;
}
int add_deserialization_func(void *ctxt, int type,
			void (*dfunc)(struct encode_context *,
				      struct decode_context *))
{
	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
	struct dfunc_info *df_info;
	unsigned long flags;

	if (!ilctxt || !dfunc)
		return -EINVAL;

	df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL);
	if (!df_info)
		return -ENOSPC;

	read_lock_irqsave(&ipc_log_context_list_lock, flags);
	spin_lock(&ilctxt->ipc_log_context_lock);
	df_info->type = type;
	df_info->dfunc = dfunc;
	list_add_tail(&df_info->list, &ilctxt->dfunc_info_list);
	spin_unlock(&ilctxt->ipc_log_context_lock);
	read_unlock_irqrestore(&ipc_log_context_list_lock, flags);
	return 0;
}
Esempio n. 7
0
/**
 * rdma_get_gid_attr - Returns GID attributes for a port of a device
 * at a requested gid_index, if a valid GID entry exists.
 * @device:		The device to query.
 * @port_num:		The port number on the device where the GID value
 *			is to be queried.
 * @index:		Index of the GID table entry whose attributes are to
 *                      be queried.
 *
 * rdma_get_gid_attr() acquires reference count of gid attributes from the
 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
 * reference to gid attribute regardless of link layer.
 *
 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
 * code.
 */
const struct ib_gid_attr *
rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
{
	const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
	struct ib_gid_table *table;
	unsigned long flags;

	if (!rdma_is_port_valid(device, port_num))
		return ERR_PTR(-EINVAL);

	table = rdma_gid_table(device, port_num);
	if (index < 0 || index >= table->sz)
		return ERR_PTR(-EINVAL);

	read_lock_irqsave(&table->rwlock, flags);
	if (!is_gid_entry_valid(table->data_vec[index]))
		goto done;

	get_gid_entry(table->data_vec[index]);
	attr = &table->data_vec[index]->attr;
done:
	read_unlock_irqrestore(&table->rwlock, flags);
	return attr;
}
Esempio n. 8
0
inline void timer_read_unlock(void) {
	read_unlock_irqrestore(&timer_lock, flags);
}
Esempio n. 9
0
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
	int retval;
	struct zfcp_adapter *adapter = erp_action->adapter;
	unsigned long flags;

	read_lock_irqsave(&zfcp_data.config_lock, flags);
	write_lock(&adapter->erp_lock);

	zfcp_erp_strategy_check_fsfreq(erp_action);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
		zfcp_erp_action_dequeue(erp_action);
		retval = ZFCP_ERP_DISMISSED;
		goto unlock;
	}

	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
		retval = ZFCP_ERP_FAILED;
		goto check_target;
	}

	zfcp_erp_action_to_running(erp_action);

	/* no lock to allow for blocking operations */
	write_unlock(&adapter->erp_lock);
	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
	retval = zfcp_erp_strategy_do_action(erp_action);
	read_lock_irqsave(&zfcp_data.config_lock, flags);
	write_lock(&adapter->erp_lock);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
		retval = ZFCP_ERP_CONTINUES;

	switch (retval) {
	case ZFCP_ERP_NOMEM:
		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
			++adapter->erp_low_mem_count;
			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
		}
		if (adapter->erp_total_count == adapter->erp_low_mem_count)
			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
		else {
			zfcp_erp_strategy_memwait(erp_action);
			retval = ZFCP_ERP_CONTINUES;
		}
		goto unlock;

	case ZFCP_ERP_CONTINUES:
		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
			--adapter->erp_low_mem_count;
			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
		}
		goto unlock;
	}

check_target:
	retval = zfcp_erp_strategy_check_target(erp_action, retval);
	zfcp_erp_action_dequeue(erp_action);
	retval = zfcp_erp_strategy_statechange(erp_action, retval);
	if (retval == ZFCP_ERP_EXIT)
		goto unlock;
	if (retval == ZFCP_ERP_SUCCEEDED)
		zfcp_erp_strategy_followup_success(erp_action);
	if (retval == ZFCP_ERP_FAILED)
		zfcp_erp_strategy_followup_failed(erp_action);

 unlock:
	write_unlock(&adapter->erp_lock);
	read_unlock_irqrestore(&zfcp_data.config_lock, flags);

	if (retval != ZFCP_ERP_CONTINUES)
		zfcp_erp_action_cleanup(erp_action, retval);

	return retval;
}
Esempio n. 10
0
static void ib_free_path_iter(struct ib_sa_attr_iter *iter)
{
	read_unlock_irqrestore(&rwlock, iter->flags);
}
Esempio n. 11
0
/*
 * EvUnSubscribeEvent() removes the user id from the list of subscribers
 * waiting for the particular event to occur.
 *
 * If the user ID does not indicate a currently registered user then
 * -ENOENT is returned.  If the event group is not a currently registered
 * group then -EEXIST is returned.  If the class in not found in the this
 * of currently know classes then -EINVAL is returned.  If the user is
 * not currently subscribed to recieve the event then -EBUSY is returned.
 *
 * Otherwise the subscription is removed and a zero is returned to
 * indicate susccess.
 */
int
EvUnSubscribeEvent(EvUserID_t userID, EvGroupID_t groupID,
		   EvClassID_t classID, EvEventID_t eventID)
{
	EvDest_t *EventDestP = NULL;
	EvDest_t *FreeDest = NULL;
	EvDestBase_t *EventBase;
	EvDestBase_t *LastEventBase = NULL;
	EvKernelInfo_t *EventUser;
	EvGroupInfo_t *EGroup;
	EvClassInfo_t *HashBase;
	EvClassInfo_t *ClassBase;
	unsigned long Flags;

	/* Check the user id. */
	read_lock_irqsave(&EvUsersLock, Flags);
	if ((EventUser = EvCheckUser(userID)) == NULL) {
		read_unlock_irqrestore(&EvUsersLock, Flags);
		return -EV_ERROR_USER_EXISTS;
	}

	read_lock(&EvGroupLock);
	read_unlock(&EvUsersLock);

	/* Get the base event group information. */
	if ((EGroup = EvGetGroupBase(groupID)) == NULL) {
		read_unlock_irqrestore(&EvGroupLock, Flags);
		return -EV_ERROR_GROUP_EXIST;
	}

	HashBase = EvGetHashBase(EGroup, classID);

	/* Find the top level entry for this class of events. */
	if ((EventBase = EvFindEventBase(classID,HashBase,&ClassBase)) == NULL){
		read_unlock_irqrestore(&EvGroupLock, Flags);
		return -EV_ERROR_CLASS_EXISTS;
	}

	spin_lock(&ClassBase->EciLock);
	read_unlock(&EvGroupLock);

	/* search until the event is found in this catagory or until the
	 * last blank element on the list if found.
	 */
	while (EventBase->EdbNext != NULL) {
		if (EventBase->EdbEvent == eventID) {
			EventDestP = &EventBase->EdbDestQ;
			break;
		}

		LastEventBase = EventBase;
		EventBase = EventBase->EdbNext;
	}

	/* If event type not found then the user process was obviously not
	 * registered for this event.
	 */
	if (EventDestP == NULL) {
		spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
		return -EV_ERROR_CLASS_NO_SUB;
	}

	if (EventDestP->EdID == userID) {
		if (EventDestP->EdNext == NULL) {
			/* This is the only element on the list so it
			 * will be removed below. */
			goto EvUnRegFreeBase;
		}

		EventDestP->EdUinfo = EventDestP->EdNext->EdUinfo;
		EventDestP->EdCB = EventDestP->EdNext->EdCB;
		EventDestP->EdID = EventDestP->EdNext->EdID;
		EventDestP->EdKinfo = EventDestP->EdNext->EdKinfo;
		FreeDest = EventDestP->EdNext;
		EventDestP->EdNext = EventDestP->EdNext->EdNext;

		goto EvUnRegFreeBase;
	}
		
	/* Allways search one ahead to help with single link list removal. */
	while (EventDestP->EdNext->EdNext != NULL) {
		if (EventDestP->EdNext->EdID == userID) {
			FreeDest = EventDestP->EdNext;
			EventDestP->EdNext = EventDestP->EdNext->EdNext;

			goto EvUnRegFreeBase;
		}
		EventDestP = EventDestP->EdNext;
	}

	/* Entry not found in list. */
	spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
	return -EV_ERROR_CLASS_NO_SUB;

EvUnRegFreeBase:
	EventBase->EdbUseCount--;

	if (EventBase->EdbUseCount == 0) {
		/* Nobody is registered to receive this event. */
		if (LastEventBase == NULL) {
			/* Free the top element */
			EventBase->EdbEvent = EventBase->EdbNext->EdbEvent;
			EventBase->EdbUseCount = EventBase->EdbNext->EdbUseCount;
			EventBase->EdbDestQ = EventBase->EdbNext->EdbDestQ;
			LastEventBase = EventBase->EdbNext;
			EventBase->EdbNext = LastEventBase->EdbNext;
			kfree(LastEventBase);
		} else {
			LastEventBase->EdbNext = EventBase->EdbNext;
			kfree(EventBase);
		}
	}

	ClassBase->EciUseCount--;
	kfree(FreeDest);

	spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
	return EV_NOERR;
}
Esempio n. 12
0
/*
 * EvSubscribeEvent() subscribes a kernel component to recieve
 * an event.  If the event call back function kernelCB is specified
 * events will be delivered by calling this function.  Otherwise 
 * events will be placed on an internal event queue and the queue must
 * be polled using the EvGetEvent function.
 *
 * This function subscribes to an event localy.  It does not attempt to
 * inform a remote master that a subscriber has been attached.  This
 * should be added in a later version of the function.
 */
int
EvSubscribeEvent(EvUserID_t userID, EvPri_t pri,
		 EvGroupID_t groupID, EvClassID_t classID,
		 EvEventID_t eventID, EvAccess_t accessCode,
		 int (*kernelCB)(EvUserID_t, EvPri_t, EvGroupID_t,
				 EvGroupID_t, EvClassID_t, EvEventID_t,
				 int, int, int, int, int, void *))
{
	EvDest_t *EventDestP = NULL;
	EvDest_t *TmpDestP = NULL;
	EvDest_t *LastEventDestP = NULL;
	EvDestBase_t *EventBase;
	EvKernelInfo_t *EventUser;
	EvGroupInfo_t *EGroup;
	EvClassInfo_t *HashBase;
	EvClassInfo_t *ClassBase;
	unsigned long Flags;

	/* Check the user ID for validity. */
	read_lock_irqsave(&EvUsersLock, Flags);
	if ((EventUser = EvCheckUser(userID)) == NULL) {
		read_unlock_irqrestore(&EvUsersLock, Flags);
		return -EV_ERROR_USER_EXISTS;
	}

	/* Assume the Event user returned status will no change during
	 * the life of this request.
	 */
	read_lock(&EvGroupLock);
	read_unlock(&EvUsersLock);

	/* Get the event group pointer. */
	if ((EGroup = EvGetGroupBase(groupID)) == NULL) {
		read_unlock_irqrestore(&EvGroupLock, Flags);
		return -EV_ERROR_GROUP_EXIST;
	}

	HashBase = EvGetHashBase(EGroup, classID);

	/* Find the top level entry for this class of events. */
	if ((EventBase = EvFindEventBase(classID,HashBase,&ClassBase)) == NULL){
		read_unlock_irqrestore(&EvGroupLock, Flags);
		return -EV_ERROR_CLASS_EXISTS;
	}

	spin_lock(&ClassBase->EciLock);
	read_unlock(&EvGroupLock);

	/* Check permissions. */
	if (EvCheckAccessCode(ClassBase->EciAccessCode, accessCode)) {
		spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
		return -EV_ERROR_CLASS_ACCESS;
	}

	/* search until the event is found in this catagory or until the
	 * last blank element on the list if found.
	 */
	while (EventBase->EdbNext != NULL) {
		if (EventBase->EdbEvent == eventID) {
			EventDestP = &EventBase->EdbDestQ;
			break;
		}
		EventBase = EventBase->EdbNext;
	}

	/* If no destination pointer has been identified for a chain
	 * search then this event type has not yet been registered by anybody
	 * so fill in the last empty list element and create a new empty one
	 * to inidcate end of list.
	 */
	if (EventDestP == NULL) {
		EventBase->EdbEvent = eventID;
		EventBase->EdbUseCount = 0;

		/* Create the next empty element to indicate end of list. */
		if ((EventBase->EdbNext = kmalloc(sizeof(EvDestBase_t),
							GFP_ATOMIC)) == NULL) {
			spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
			return -EV_ERROR_MEM_ALLOC;
		}

		EventBase->EdbNext->EdbNext = NULL;

		EventDestP = &EventBase->EdbDestQ;
		EventDestP->EdNext = kmalloc(sizeof(EvDest_t), GFP_ATOMIC);
		EventDestP->EdNext->EdNext = NULL;

		 goto EvFillInKernelRequestPacket;
	}

	/* Now search to see if this file descriptor already has registered
	 * for this event type.
	 */
	while (EventDestP->EdNext != NULL) {
		if (EventDestP->EdID == userID) {
			spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
			return -EV_ERROR_CLASS_BUSY;
				
		}

		LastEventDestP = EventDestP;
		EventDestP = EventDestP->EdNext;
	}

	/* Now record the destination and create a new empty element to 
	 * indicate end of list.
	 */

	/* Most registrations go at the end of the list. */
	if ((LastEventDestP != NULL) && (LastEventDestP->EdPri >= pri)) {
		if ((EventDestP->EdNext = kmalloc(sizeof(EvDest_t),
							GFP_ATOMIC)) == NULL) {
			spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
			return -EV_ERROR_MEM_ALLOC;
		}

		EventDestP->EdNext->EdNext = NULL;
		goto EvFillInKernelRequestPacket;
	}

	EventDestP = &EventBase->EdbDestQ;

	/* Check the priority against the top element */
	if (EventDestP->EdPri >= pri) {
		/* Priority of event places it somewhere in the middle */
		while (EventDestP->EdNext->EdPri >= pri) {
			EventDestP = EventDestP->EdNext;
		}
	}

	if ((TmpDestP = kmalloc(sizeof(EvDest_t), GFP_ATOMIC)) == NULL) {
		spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
		return -EV_ERROR_MEM_ALLOC;
	}

	TmpDestP->EdPri = EventDestP->EdPri;
	TmpDestP->EdUinfo = EventDestP->EdUinfo;
	TmpDestP->EdCB = EventDestP->EdCB;
	TmpDestP->EdID = EventDestP->EdID;
	TmpDestP->EdKinfo = EventDestP->EdKinfo;
	TmpDestP->EdNext = EventDestP->EdNext;
	EventDestP->EdNext = TmpDestP;

EvFillInKernelRequestPacket:
	EventBase->EdbUseCount++;
	EventDestP->EdPri = pri;
	EventDestP->EdUinfo = NULL;
	EventDestP->EdID = userID;
	EventDestP->EdCB = kernelCB;

	EventDestP->EdKinfo = EventUser;

	ClassBase->EciUseCount++;

	spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
	return EV_NOERR;
}
Esempio n. 13
0
static void rmnet_mhi_tx_cb(struct mhi_result *result)
{
	struct net_device *dev;
	struct rmnet_mhi_private *rmnet_mhi_ptr;
	unsigned long burst_counter = 0;
	unsigned long flags;

	rmnet_mhi_ptr = result->user_data;
	dev = rmnet_mhi_ptr->dev;
	tx_interrupts_count[rmnet_mhi_ptr->dev_index]++;

	rmnet_log(MSG_VERBOSE, "Entered\n");
	if (!result->payload_buf || !result->bytes_xferd)
		return;
	/* Free the buffers which are TX'd up to the provided address */
	while (!skb_queue_empty(&(rmnet_mhi_ptr->tx_buffers))) {
		struct sk_buff *skb =
			skb_dequeue(&(rmnet_mhi_ptr->tx_buffers));
		if (!skb) {
			rmnet_log(MSG_CRITICAL,
				  "NULL buffer returned, error");
			break;
		} else {
			struct tx_buffer_priv *tx_priv =
				(struct tx_buffer_priv *)(skb->cb);
			dma_addr_t dma_addr = tx_priv->dma_addr;
			int data_len = skb->len;

			dma_unmap_single(&(dev->dev),
					dma_addr,
					 skb->len,
					 DMA_TO_DEVICE);
			kfree_skb(skb);
			burst_counter++;

			/* Update statistics */
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += data_len;

			/* The payload is expected to be the phy addr.
			   Comparing to see if it's the last skb to
			   replenish
			*/
			if (dma_addr ==
				result->payload_buf)
				break;
		}
	} /* While TX queue is not empty */
	tx_cb_skb_free_burst_min[rmnet_mhi_ptr->dev_index] =
		min(burst_counter,
		    tx_cb_skb_free_burst_min[rmnet_mhi_ptr->dev_index]);

	tx_cb_skb_free_burst_max[rmnet_mhi_ptr->dev_index] =
		max(burst_counter,
		    tx_cb_skb_free_burst_max[rmnet_mhi_ptr->dev_index]);

	/* In case we couldn't write again, now we can! */
	read_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock, flags);
	netif_wake_queue(dev);
	read_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
	rmnet_log(MSG_VERBOSE, "Exited\n");
}
Esempio n. 14
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.caller = current,
		.mod = mod,
		.hcpu = hcpu,
	};
	unsigned long timeout;
	unsigned long flags;
	struct task_struct *g, *p;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	set_cpu_active(cpu, false);
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		set_cpu_active(cpu, true);

		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		set_cpu_active(cpu, true);
		/* CPU didn't die: tell everyone.  Can't complain. */
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	timeout = jiffies + HZ;
	/* Wait for it to sleep (leaving idle task). */
	while (!idle_cpu(cpu)) {
		msleep(1);
		if (time_after(jiffies, timeout)) {
			printk("%s: CPU%d not idle after offline. Running tasks:\n", __func__, cpu);
			read_lock_irqsave(&tasklist_lock, flags);
			do_each_thread(g, p) {
				if (!p->se.on_rq || task_cpu(p) != cpu)
					continue;
				sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock_irqrestore(&tasklist_lock, flags);
			timeout = jiffies + HZ;
		}
	}

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Esempio n. 15
0
OS_VOID OS_RWLockReadRelease( OS_RWLOCK* aLock, OS_RWLOCK_FLAG* aFlag )
{
    rwlock_t* lRWLock = ( rwlock_t * ) aLock->Space;

    read_unlock_irqrestore( lRWLock, ( unsigned long ) *aFlag );
}
Esempio n. 16
0
int
EvRemoteSendEvent(EvUserID_t senderID, EvGroupID_t memberID, EvPri_t pri,
                  EvGroupID_t groupID, EvClassID_t classID, EvEventID_t eventID,
                  EvAccess_t accessCode,
                  int info0, int info1, int info2, int info3,
                  int dataLen, void *data)
{
    EvGroupInfo_t *EGroup;
    EvDestBase_t *EventBase;
    EvClassInfo_t *HashBase;
    EvClassInfo_t *ClassBase;
    unsigned long Flags;
    int RetVal;

    read_lock_irqsave(&EvGroupLock, Flags);
    if ((EGroup = EvGetGroupBase(groupID)) == NULL) {
        read_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_GROUP_EXIST;
    }

    /* If this a remote gen to the master then echo it back to
     * all members.
     */
    if (EGroup->EgiType == EG_MASTER) {
        if (EGroup->EgiGroupDest.EdID != 0) {
            RetVal = EvGroupSendEvent(senderID, pri,
                                      EGroup->EgiID, EGroup->EgiMemberID,
                                      accessCode, classID, eventID,
                                      info0, info1, info2, info3,
                                      &EGroup->EgiGroupDest, dataLen, data);
            if (RetVal) {
                read_unlock_irqrestore(&EvGroupLock, Flags);
                return RetVal;
            }
        }
    }

    /* If the memberID issuing the event is the same as the member
     * id of this system then we have been echoed back an event
     * we sent tot he master.  Ignore it.
     */
    if ((EGroup->EgiType == EG_MEMBER) &&
            (EGroup->EgiMemberID == memberID)) {
        read_unlock_irqrestore(&EvGroupLock, Flags);
        return EV_NOERR;
    }

    HashBase = EvGetHashBase(EGroup, classID);

    if ((EventBase = EvFindEventBase(classID, HashBase, &ClassBase))==NULL) {
        read_unlock_irqrestore(&EvGroupLock, Flags);
        return -EV_ERROR_CLASS_EXISTS;
    }

    spin_lock(&ClassBase->EciLock);
    read_unlock(&EvGroupLock);

    RetVal = _EvSendEvent(EventBase, senderID, pri, groupID, memberID, classID,
                          eventID, info0, info1, info2, info3, dataLen, data);

    spin_unlock_irqrestore(&ClassBase->EciLock, Flags);
    return RetVal;
}
Esempio n. 17
0
/**
 * omap_device_set_rate - Set a new rate at which the device is to operate
 * @req_dev : pointer to the device requesting the scaling.
 * @dev : pointer to the device that is to be scaled
 * @rate : the rnew rate for the device.
 *
 * This API gets the device opp table associated with this device and
 * tries putting the device to the requested rate and the voltage domain
 * associated with the device to the voltage corresponding to the
 * requested rate. Since multiple devices can be assocciated with a
 * voltage domain this API finds out the possible voltage the
 * voltage domain can enter and then decides on the final device
 * rate. Return 0 on success else the error value
 */
int omap_device_set_rate(struct device *req_dev, struct device *dev,
			unsigned long rate)
{
	struct omap_opp *opp;
	unsigned long volt, freq, min_freq, max_freq, flags;
	struct voltagedomain *voltdm;
	struct platform_device *pdev;
	struct omap_device *od;
	int ret;

	pdev = container_of(dev, struct platform_device, dev);
	od = _find_by_pdev(pdev);

	/* if in low power DPLL cascading mode, bail out early */
	if (cpu_is_omap44xx()) {
		read_lock_irqsave(&dpll_cascading_lock, flags);

		if (in_dpll_cascading) {
			ret = -EINVAL;
			goto out;
		}
	}

	/*
	 * Figure out if the desired frquency lies between the
	 * maximum and minimum possible for the particular device
	 */
	min_freq = 0;
	if (IS_ERR(opp_find_freq_ceil(dev, &min_freq))) {
		dev_err(dev, "%s: Unable to find lowest opp\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	max_freq = ULONG_MAX;
	if (IS_ERR(opp_find_freq_floor(dev, &max_freq))) {
		dev_err(dev, "%s: Unable to find highest opp\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	if (rate < min_freq)
		freq = min_freq;
	else if (rate > max_freq)
		freq = max_freq;
	else
		freq = rate;

	/* Get the possible rate from the opp layer */
	opp = opp_find_freq_ceil(dev, &freq);
	if (IS_ERR(opp)) {
		dev_dbg(dev, "%s: Unable to find OPP for freq%ld\n",
			__func__, rate);
		ret = -ENODEV;
		goto out;
	}
	if (unlikely(freq != rate))
		dev_dbg(dev, "%s: Available freq %ld != dpll freq %ld.\n",
			__func__, freq, rate);

	/* Get the voltage corresponding to the requested frequency */
	volt = opp_get_voltage(opp);

	/*
	 * Call into the voltage layer to get the final voltage possible
	 * for the voltage domain associated with the device.
	 */
	voltdm = od->hwmods[0]->voltdm;
	ret = omap_voltage_add_userreq(voltdm, req_dev, &volt);
	if (ret) {
		dev_err(dev, "%s: Unable to get the final volt for scaling\n",
			__func__);
		goto out;
	}

	/* Do the actual scaling */
	ret =  omap_voltage_scale(voltdm);
out:
	if (cpu_is_omap44xx())
		read_unlock_irqrestore(&dpll_cascading_lock, flags);

	return ret;
}
Esempio n. 18
0
/**
 * @brief Function for sending data on an outbound channel.
 * This function only sends on TRE's worth of
 * data and may chain the TRE as specified by the caller.
 *
 * @param device [IN ] Pointer to mhi context used to send the TRE
 * @param chan [IN ] Channel number to send the TRE on
 * @param buf [IN ] Physical address of buffer to be linked to descriptor
 * @param buf_len [IN ] Length of buffer, which will be populated in the TRE
 * @param chain [IN ] Specification on whether this TRE should be chained
 *
 * @return MHI_STATUS
 */
MHI_STATUS mhi_queue_xfer(mhi_client_handle *client_handle,
		uintptr_t buf, size_t buf_len, u32 chain, u32 eob)
{
	mhi_xfer_pkt *pkt_loc;
	MHI_STATUS ret_val;
	MHI_CLIENT_CHANNEL chan;
	mhi_device_ctxt *mhi_dev_ctxt;
	unsigned long flags;

	if (NULL == client_handle || !VALID_CHAN_NR(client_handle->chan) ||
		0 == buf || chain >= MHI_TRE_CHAIN_LIMIT || 0 == buf_len) {
		mhi_log(MHI_MSG_CRITICAL, "Bad input args\n");
		return MHI_STATUS_ERROR;
	}
	MHI_ASSERT(VALID_BUF(buf, buf_len),
			"Client buffer is of invalid length\n");
	mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	chan = client_handle->chan;


	/* Bump up the vote for pending data */
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);

	atomic_inc(&mhi_dev_ctxt->flags.data_pending);
	mhi_dev_ctxt->counters.m1_m0++;
	if (mhi_dev_ctxt->flags.link_up)
		mhi_assert_device_wake(mhi_dev_ctxt);
	read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);

	pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
	pkt_loc->data_tx_pkt.buffer_ptr = buf;

	if (likely(0 != client_handle->intmod_t))
		MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1);
	else
		MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0);

	MHI_TRB_SET_INFO(TX_TRB_IEOT, pkt_loc, 1);
	MHI_TRB_SET_INFO(TX_TRB_CHAIN, pkt_loc, chain);
	MHI_TRB_SET_INFO(TX_TRB_IEOB, pkt_loc, eob);
	MHI_TRB_SET_INFO(TX_TRB_TYPE, pkt_loc, MHI_PKT_TYPE_TRANSFER);
	MHI_TX_TRB_SET_LEN(TX_TRB_LEN, pkt_loc, buf_len);

	if (chan % 2 == 0) {
		atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
		mhi_log(MHI_MSG_VERBOSE,
			"Queued outbound pkt. Pending Acks %d\n",
		atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
	}

	/* Add the TRB to the correct transfer ring */
	ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
				(void *)&pkt_loc);
	if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
		mhi_log(MHI_MSG_INFO, "Failed to insert trb in xfer ring\n");
		goto error;
	}
	mhi_notify_device(mhi_dev_ctxt, chan);
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return MHI_STATUS_SUCCESS;
error:
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return ret_val;
}
Esempio n. 19
0
static int sched_debug_show_at_KE(struct seq_file *m, void *v)
{
	u64 ktime, sched_clk, cpu_clk;
	unsigned long flags;
	int cpu;
	int locked;

	local_irq_save(flags);
	ktime = ktime_to_ns(ktime_get());
	sched_clk = sched_clock();
	cpu_clk = local_clock();
	local_irq_restore(flags);

	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

#define P(x) \
	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(ktime);
	PN(sched_clk);
	PN(cpu_clk);
	P(jiffies);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
	P(sched_clock_stable);
#endif
#undef PN
#undef P

	SEQ_printf(m, "\n");
	SEQ_printf(m, "sysctl_sched\n");

#define P(x) \
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	PN(sysctl_sched_latency);
	PN(sysctl_sched_min_granularity);
	PN(sysctl_sched_wakeup_granularity);
	P(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#undef PN
#undef P

	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);

	//read_lock_irqsave(&tasklist_lock, flags);
	locked = read_trylock_n_irqsave(&tasklist_lock, &flags, m, "sched_debug_show_at_KE");
	//for_each_online_cpu(cpu)
	for_each_possible_cpu(cpu)
		print_cpu_at_KE(m, cpu);
	if (locked)
		read_unlock_irqrestore(&tasklist_lock, flags);
	SEQ_printf(m, "\n");

	return 0;
}