Esempio n. 1
0
void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	array = &rt_rq->active;
	for (i = 0; i < MAX_RT_PRIO; i++) {
		INIT_LIST_HEAD(array->queue + i);
		__clear_bit(i, array->bitmap);
	}
	/* delimiter for bitsearch: */
	__set_bit(MAX_RT_PRIO, array->bitmap);

#if defined CONFIG_SMP
	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	rt_rq->highest_prio.next = MAX_RT_PRIO;
	rt_rq->rt_nr_migratory = 0;
	rt_rq->overloaded = 0;
	plist_head_init(&rt_rq->pushable_tasks);
#endif

	rt_rq->rt_time = 0;
	rt_rq->rt_throttled = 0;
	rt_rq->rt_runtime = 0;
	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
Esempio n. 2
0
static void rt_mutex_init_task(struct task_struct *p)
{
	spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
	plist_head_init(&p->pi_waiters, &p->pi_lock);
	p->pi_blocked_on = NULL;
#endif
}
Esempio n. 3
0
/**
 * __rt_mutex_init - initialize the rt lock
 *
 * @lock: the rt lock to be initialized
 *
 * Initialize the rt lock to unlocked state.
 *
 * Initializing of a locked rt lock is not allowed
 */
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
	lock->owner = NULL;
	raw_spin_lock_init(&lock->wait_lock);
	plist_head_init(&lock->wait_list);

	debug_rt_mutex_init(lock, name);
}
Esempio n. 4
0
/*
 * we initialize the wait_list runtime. (Could be done build-time and/or
 * boot-time.)
 */
static inline void init_lists(struct rt_mutex *lock)
{
	if (unlikely(!lock->wait_list.prio_list.prev)) {
		plist_head_init(&lock->wait_list, &lock->wait_lock);
#ifdef CONFIG_DEBUG_RT_MUTEXES
		pi_initialized++;
#endif
	}
}
void *mm_core_init(struct mm_common *mm_common, \
		const char *mm_dev_name, \
		MM_CORE_HW_IFC *core_params)
{
	struct mm_core *core_dev = NULL;

	if (validate(core_params))
		goto err_register2;

	core_dev = kmalloc(sizeof(struct mm_core), GFP_KERNEL);
	if (core_dev == NULL) {
		pr_err("mm_core_init: kmalloc failed\n");
		goto err_register;
	}
	memset(core_dev, 0, sizeof(struct mm_core));

	/* Init structure */
	INIT_WORK(&(core_dev->job_scheduler), mm_fmwk_job_scheduler);
	plist_head_init(&(core_dev->job_list));
	core_dev->device_job_id = 1;
	core_dev->mm_core_idle = true;
	core_dev->mm_core_is_on = false;

	core_dev->mm_common = mm_common;

	/* Map the dev registers */
	if (core_params->mm_hw_size) {
		core_dev->dev_base = (void __iomem *)ioremap_nocache(\
						core_params->mm_base_addr, \
						core_params->mm_hw_size);
		if (core_dev->dev_base == NULL) {
			pr_err("register mapping failed ");
			goto err_register;
			}
		/* core_params is known to device, device can make use of KVA */
		core_params->mm_virt_addr = (void *)core_dev->dev_base;
		}
	core_params->mm_update_virt_addr(core_params->mm_virt_addr);
	core_dev->mm_device = *core_params;
	if (core_params->mm_version_init != NULL) {
		mm_core_enable_clock(core_dev);
		core_params->mm_version_init(core_params->mm_device_id,
					     core_params->mm_virt_addr,
					     &mm_common->version_info);
		mm_core_disable_clock(core_dev);
	}

	return core_dev;

err_register:
	pr_err("Error in core_init for %s", mm_dev_name);
	if (core_dev)
		mm_core_exit(core_dev);
err_register2:
	return NULL;
}
Esempio n. 6
0
File: rt.c Progetto: BozkurTR/kernel
// ARM10C 20140830
// [pcp0] &rq->rt: &(&runqueues)->rt, rq: (&runqueues)
void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	// &rt_rq->active: (&(&runqueues)->rt)->active
	array = &rt_rq->active;
	// array: (&(&runqueues)->rt)->active

	// MAX_RT_PRIO: 100
	for (i = 0; i < MAX_RT_PRIO; i++) {
		// i: 0, array->queue: (&(&(&runqueues)->rt)->active)->queue
		INIT_LIST_HEAD(array->queue + i);
		// (&(&(&runqueues)->rt)->active)->queue[0] 의 리스트 초기화

		// i: 0, array->bitmap: (&(&(&runqueues)->rt)->active)->bitmap
		__clear_bit(i, array->bitmap);
		// (&(&(&runqueues)->rt)->active)->bitmap의 0 bit를 클리어

		// i: 1 ... 99 까지 수행
	}

	/* delimiter for bitsearch: */
	// MAX_RT_PRIO: 100, array->bitmap: (&(&(&runqueues)->rt)->active)->bitmap
	__set_bit(MAX_RT_PRIO, array->bitmap);
	// (&(&(&runqueues)->rt)->active)->bitmap의 100 bit를 1로 세팅

#if defined CONFIG_SMP // CONFIG_SMP=y
	// &rt_rq->highest_prio.curr: (&(&runqueues)->rt)->highest_prio.curr, MAX_RT_PRIO: 100
	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	// &rt_rq->highest_prio.curr: (&(&runqueues)->rt)->highest_prio.curr: 100

	// &rt_rq->highest_prio.next: (&(&runqueues)->rt)->highest_prio.next, MAX_RT_PRIO: 100
	rt_rq->highest_prio.next = MAX_RT_PRIO;
	// &rt_rq->highest_prio.next: (&(&runqueues)->rt)->highest_prio.next: 100

	// &rt_rq->rt_nr_migratory: (&(&runqueues)->rt)->rt_nr_migratory
	rt_rq->rt_nr_migratory = 0;
	// &rt_rq->rt_nr_migratory: (&(&runqueues)->rt)->rt_nr_migratory: 0

	// &rt_rq->overloaded: (&(&runqueues)->rt)->overloaded
	rt_rq->overloaded = 0;
	// &rt_rq->overloaded: (&(&runqueues)->rt)->overloaded: 0

	// &rt_rq->pushable_tasks: &(&(&runqueues)->rt)->pushable_tasks
	plist_head_init(&rt_rq->pushable_tasks);
	// (&(&(&runqueues)->rt)->pushable_tasks)->node_list 리스트 초기화
#endif

	// &rt_rq->rt_time: (&(&runqueues)->rt)->rt_time
	rt_rq->rt_time = 0;
	// &rt_rq->rt_time: (&(&runqueues)->rt)->rt_time: 0

	// &rt_rq->rt_throttled: (&(&runqueues)->rt)->rt_throttled
	rt_rq->rt_throttled = 0;
	// &rt_rq->rt_throttled: (&(&runqueues)->rt)->rt_throttled: 0

	// &rt_rq->rt_runtime: (&(&runqueues)->rt)->rt_runtime
	rt_rq->rt_runtime = 0;
	// &rt_rq->rt_runtime: (&(&runqueues)->rt)->rt_runtime: 0

	// &rt_rq->rt_runtime_lock: (&(&runqueues)->rt)->rt_runtime_lock
	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
	// (&(&runqueues)->rt)->rt_runtime_lock 을 사용한 spinlock 초기화
}
static int kona_memc_probe(struct platform_device *pdev)
{
	u32 val, *addr;
	int size, ret;
	struct resource *iomem;
	struct kona_memc_pdata *pdata;
	spin_lock_init(&kona_memc.memc_lock);
	plist_head_init(&kona_memc.min_pwr_list);
	kona_memc.active_min_pwr = 0;

	if (pdev->dev.platform_data)
		pdata =	(struct kona_memc_pdata *)pdev->dev.platform_data;
	else if (pdev->dev.of_node) {
		pdata = kzalloc(sizeof(struct kona_memc_pdata),	GFP_KERNEL);

		if (!pdata)
			return -ENOMEM;

		/* Get register memory resource */
		iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		if (!iomem) {
			pr_info("no mem resource\n");
			kfree(pdata);
			return -ENODEV;
		}
		pdata->memc0_ns_base = (u32)ioremap(iomem->start,
				resource_size(iomem));
		if (!pdata->memc0_ns_base) {
			pr_info("unable to map in registers\n");
			kfree(pdata);
			return -ENOMEM;
		}

		addr = (u32 *)of_get_property(pdev->dev.of_node, "chipreg_base",
				&size);
		if (!addr) {
			kfree(pdata);
			return -EINVAL;
		}
		val = *(addr + 1);
		pdata->chipreg_base = (u32)ioremap(be32_to_cpu(*addr),
				be32_to_cpu(val));

		addr = (u32 *)of_get_property(pdev->dev.of_node,
				"memc0_aphy_base", &size);
		if (!addr) {
			kfree(pdata);
			return -EINVAL;
		}
		val = *(addr + 1);
		pdata->memc0_aphy_base = (u32)ioremap(be32_to_cpu(*addr),
				be32_to_cpu(val));


		ret = of_property_read_u32(pdev->dev.of_node,
				"seq_busy_val", &val);
		if (ret != 0) {
			kfree(pdata);
			return -EINVAL;
		}
		pdata->seq_busy_val = val;

		if (of_property_read_u32(pdev->dev.of_node,
					"flags", &val)) {
			kfree(pdata);
			return -EINVAL;
		}
		pdata->flags = val;

		if (of_property_read_u32(pdev->dev.of_node,
					"max_pwr", &val)) {
			kfree(pdata);
			return -EINVAL;
		}
		pdata->max_pwr = val;

	} else {
		pr_info("%s: no platform data found\n", __func__);
		return -EINVAL;
	}

	kona_memc.pdata = pdata;
	kona_memc.memc0_ns_base = pdata->memc0_ns_base;
	kona_memc.chipreg_base = pdata->chipreg_base;
	kona_memc.memc0_aphy_base = pdata->memc0_aphy_base;
	memc_init(&kona_memc);
	pr_info("%s: ddr freq = %lu\n", __func__,
			compute_ddr_clk_freq(&kona_memc));
	return 0;
}