void kbase_gpuprops_set(kbase_device *kbdev)
{
	kbase_gpu_props *gpu_props;
	struct midg_raw_gpu_props *raw;

	KBASE_DEBUG_ASSERT(NULL != kbdev);
	gpu_props = &kbdev->gpu_props;
	raw = &gpu_props->props.raw_props;

	/* Initialize the base_gpu_props structure from the hardware */
	kbase_gpuprops_get_props(&gpu_props->props, kbdev);

	/* Populate the derived properties */
	kbase_gpuprops_calculate_props(&gpu_props->props, kbdev);

	/* Populate kbase-only fields */
	gpu_props->l2_props.associativity = KBASE_UBFX32(raw->l2_features, 8U, 8);
	gpu_props->l2_props.external_bus_width = KBASE_UBFX32(raw->l2_features, 24U, 8);

	gpu_props->l3_props.associativity = KBASE_UBFX32(raw->l3_features, 8U, 8);
	gpu_props->l3_props.external_bus_width = KBASE_UBFX32(raw->l3_features, 24U, 8);

	gpu_props->mem.core_group = KBASE_UBFX32(raw->mem_features, 0U, 1);
	gpu_props->mem.supergroup = KBASE_UBFX32(raw->mem_features, 1U, 1);

	gpu_props->mmu.va_bits = KBASE_UBFX32(raw->mmu_features, 0U, 8);
	gpu_props->mmu.pa_bits = KBASE_UBFX32(raw->mmu_features, 8U, 8);

	gpu_props->num_cores = hweight64(raw->shader_present);
	gpu_props->num_core_groups = hweight64(raw->l2_present);
	gpu_props->num_supergroups = hweight64(raw->l3_present);
	gpu_props->num_address_spaces = hweight32(raw->as_present);
	gpu_props->num_job_slots = hweight32(raw->js_present);
}
Ejemplo n.º 2
0
/*
 * Like ocfs2_hamming_encode(), this can handle hunks.  nr is the bit
 * offset of the current hunk.  If bit to be fixed is not part of the
 * current hunk, this does nothing.
 *
 * If you only have one hunk, use ocfs2_hamming_fix_block().
 */
void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr,
		       unsigned int fix)
{
	unsigned int i, b;

	BUG_ON(!d);

	/*
	 * If the bit to fix has an hweight of 1, it's a parity bit.  One
	 * busted parity bit is its own error.  Nothing to do here.
	 */
	if (hweight32(fix) == 1)
		return;

	/*
	 * nr + d is the bit right past the data hunk we're looking at.
	 * If fix after that, nothing to do
	 */
	if (fix >= calc_code_bit(nr + d, NULL))
		return;

	/*
	 * nr is the offset in the data hunk we're starting at.  Let's
	 * start b at the offset in the code buffer.  See hamming_encode()
	 * for a more detailed description of 'b'.
	 */
	b = calc_code_bit(nr, NULL);
	/* If the fix is before this hunk, nothing to do */
	if (fix < b)
		return;

	for (i = 0; i < d; i++, b++)
	{
		/* Skip past parity bits */
		while (hweight32(b) == 1)
			b++;

		/*
		 * i is the offset in this data hunk.
		 * nr + i is the offset in the total data buffer.
		 * b is the offset in the total code buffer.
		 *
		 * Thus, when b == fix, bit i in the current hunk needs
		 * fixing.
		 */
		if (b == fix)
		{
			if (ocfs2_test_bit(i, data))
				ocfs2_clear_bit(i, data);
			else
				ocfs2_set_bit(i, data);
			break;
		}
	}
}
Ejemplo n.º 3
0
static ssize_t solo_get_tw28xx(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	struct solo6010_dev *solo_dev =
		container_of(dev, struct solo6010_dev, dev);

	return sprintf(buf, "tw2815[%d] tw2864[%d] tw2865[%d]\n",
		       hweight32(solo_dev->tw2815),
		       hweight32(solo_dev->tw2864),
		       hweight32(solo_dev->tw2865));
}
Ejemplo n.º 4
0
unsigned int __get_pending_param_count(struct fimc_is *is)
{
	struct chain_config *config = &is->config[is->config_index];
	unsigned long flags;
	unsigned int count;

	spin_lock_irqsave(&is->slock, flags);
	count = hweight32(config->p_region_index[0]);
	count += hweight32(config->p_region_index[1]);
	spin_unlock_irqrestore(&is->slock, flags);

	return count;
}
Ejemplo n.º 5
0
void hweight32_test()
{
	for (int i = 0; i < 100000; ++i) {
		uint32_t r = RAND_NR_NEXT(u, v, w);
		assert(__builtin_popcountl(r) == hweight32(r));
	}
}
Ejemplo n.º 6
0
/*
 * If we have a gap in the pending bits, that means we either
 * raced with the hardware or failed to readout all upper
 * objects in the last run due to quota limit.
 */
static u32 c_can_adjust_pending(u32 pend)
{
	u32 weight, lasts;

	if (pend == RECEIVE_OBJECT_BITS)
		return pend;

	/*
	 * If the last set bit is larger than the number of pending
	 * bits we have a gap.
	 */
	weight = hweight32(pend);
	lasts = fls(pend);

	/* If the bits are linear, nothing to do */
	if (lasts == weight)
		return pend;

	/*
	 * Find the first set bit after the gap. We walk backwards
	 * from the last set bit.
	 */
	for (lasts--; pend & (1 << (lasts - 1)); lasts--);

	return pend & ~((1 << lasts) - 1);
}
/**
 * bu21013_do_touch_report(): Get the touch co-ordinates
 * @data: bu21013_ts_data structure pointer
 *
 * Get the touch co-ordinates from touch sensor registers and writes
 * into device structure and returns integer.
 */
static int bu21013_do_touch_report(struct bu21013_ts_data *data)
{
	u8	buf[LENGTH_OF_BUFFER];
	unsigned int pos_x[2], pos_y[2];
	bool	has_x_sensors, has_y_sensors;
	int	finger_down_count = 0;
	int	i;

	if (data == NULL)
		return -EINVAL;

	if (bu21013_read_block_data(data, buf) < 0)
		return -EINVAL;

	has_x_sensors = hweight32(buf[0] & BU21013_SENSORS_EN_0_7);
	has_y_sensors = hweight32(((buf[1] & BU21013_SENSORS_EN_8_15) |
		((buf[2] & BU21013_SENSORS_EN_16_23) << SHIFT_8)) >> SHIFT_2);
	if (!has_x_sensors || !has_y_sensors)
		return 0;

	for (i = 0; i < 2; i++) {
		const u8 *p = &buf[4 * i + 3];
		unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
		unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
		if (x == 0 || y == 0)
			continue;
		x = x * data->factor_x / SCALE_FACTOR;
		y = y * data->factor_y / SCALE_FACTOR;
		pos_x[finger_down_count] = x;
		pos_y[finger_down_count] = y;
		finger_down_count++;
	}

	if (finger_down_count) {
		if (finger_down_count == 2 &&
				(abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
				abs(pos_y[0] - pos_y[1]) < DELTA_MIN))
			return 0;

		for (i = 0; i < finger_down_count; i++) {
			if (data->chip->portrait && data->chip->x_flip)
				pos_x[i] = data->chip->x_max_res - pos_x[i];
			if (data->chip->portrait && data->chip->y_flip)
				pos_y[i] = data->chip->y_max_res - pos_y[i];
			input_report_abs(data->in_dev, ABS_MT_TOUCH_MAJOR,
						max(pos_x[i], pos_y[i]));
			input_report_abs(data->in_dev, ABS_MT_POSITION_X,
								pos_x[i]);
			input_report_abs(data->in_dev, ABS_MT_POSITION_Y,
								pos_y[i]);
			input_mt_sync(data->in_dev);
		}
	} else
		input_mt_sync(data->in_dev);

	input_sync(data->in_dev);

	return 0;
}
Ejemplo n.º 8
0
unsigned int hweight32_time()
{
	unsigned int r = 0;

	for (uint32_t i = 0; i < 100000000; ++i)
		r += hweight32(i);
	return r;
}
Ejemplo n.º 9
0
/*
 * See following comments. This insn must be 'push'.
 */
static enum probes_insn __kprobes t16_check_stack(probes_opcode_t insn,
		struct arch_probes_insn *asi,
		const struct decode_header *h)
{
	unsigned int reglist = insn & 0x1ff;
	asi->stack_space = hweight32(reglist) * 4;
	return INSN_GOOD;
}
Ejemplo n.º 10
0
static int count_bits32(uint32_t *addr, unsigned size)
{
    int count = 0, i;

    for (i = 0; i < size; i++) {
        count += hweight32(*(addr+i));
    }
    return count;
}
Ejemplo n.º 11
0
static uint64_t count_bits32(dm_bitset_t bs)
{
	unsigned i, size = bs[0]/(unsigned)DM_BITS_PER_INT + 1;
	unsigned count = 0;

	for (i = 1; i <= size; i++)
		count += hweight32(bs[i]);

	return (uint64_t)count;
}
Ejemplo n.º 12
0
static int gk20a_determine_L2_size_bytes(struct gk20a *g)
{
	const u32 gpuid = GK20A_GPUID(g->gpu_characteristics.arch,
				      g->gpu_characteristics.impl);
	u32 lts_per_ltc;
	u32 ways;
	u32 sets;
	u32 bytes_per_line;
	u32 active_ltcs;
	u32 cache_size;

	u32 tmp;
	u32 active_sets_value;

	tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r());
	ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp));

	active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
	if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
		sets = 64;
	} else if (active_sets_value ==
		 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
		sets = 32;
	} else if (active_sets_value ==
		 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
		sets = 16;
	} else {
		dev_err(dev_from_gk20a(g),
			"Unknown constant %u for active sets",
		       (unsigned)active_sets_value);
		sets = 0;
	}

	active_ltcs = g->gr.num_fbps;

	/* chip-specific values */
	switch (gpuid) {
	case GK20A_GPUID_GK20A:
		lts_per_ltc = 1;
		bytes_per_line = 128;
		break;

	default:
		dev_err(dev_from_gk20a(g), "Unknown GPU id 0x%02x\n",
			(unsigned)gpuid);
		lts_per_ltc = 0;
		bytes_per_line = 0;
	}

	cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;

	return cache_size;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
			struct queue_properties *q)
{
	struct cik_mqd *m;
	struct kfd_cu_info cu_info;
	uint32_t mgmt_se_mask;
	uint32_t cu_sh_mask, cu_sh_shift;
	uint32_t cu_mask;
	int se, sh;

	if (q->cu_mask == 0)
		return;

	m = get_mqd(mqd);
	m->compute_static_thread_mgmt_se0 = 0;
	m->compute_static_thread_mgmt_se1 = 0;
	m->compute_static_thread_mgmt_se2 = 0;
	m->compute_static_thread_mgmt_se3 = 0;

	mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
	cu_mask = q->cu_mask;
	for (se = 0; se < cu_info.num_shader_engines && cu_mask; se++) {
		mgmt_se_mask = 0;
		for (sh = 0; sh < 2 && cu_mask; sh++) {
			cu_sh_shift = hweight32(cu_info.cu_bitmap[se][sh]);
			cu_sh_mask = (1 << cu_sh_shift) - 1;
			mgmt_se_mask |= (cu_mask & cu_sh_mask) << (sh * 16);
			cu_mask >>= cu_sh_shift;
		}
		switch (se) {
		case 0:
			m->compute_static_thread_mgmt_se0 = mgmt_se_mask;
			break;
		case 1:
			m->compute_static_thread_mgmt_se1 = mgmt_se_mask;
			break;
		case 2:
			m->compute_static_thread_mgmt_se2 = mgmt_se_mask;
			break;
		case 3:
			m->compute_static_thread_mgmt_se3 = mgmt_se_mask;
			break;
		default:
			break;
		}
	}
	pr_debug("kfd: update cu mask to %#x %#x %#x %#x\n",
		m->compute_static_thread_mgmt_se0,
		m->compute_static_thread_mgmt_se1,
		m->compute_static_thread_mgmt_se2,
		m->compute_static_thread_mgmt_se3);
}
Ejemplo n.º 14
0
/*
 * Real-mode H_CONFER implementation.
 * We check if we are the only vcpu out of this virtual core
 * still running in the guest and not ceded.  If so, we pop up
 * to the virtual-mode implementation; if not, just return to
 * the guest.
 */
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
			    unsigned int yield_count)
{
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
	int threads_running;
	int threads_ceded;
	int threads_conferring;
	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
	int rv = H_SUCCESS; /* => don't yield */

	set_bit(vcpu->arch.ptid, &vc->conferring_threads);
	while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
		threads_running = VCORE_ENTRY_COUNT(vc);
		threads_ceded = hweight32(vc->napping_threads);
		threads_conferring = hweight32(vc->conferring_threads);
		if (threads_ceded + threads_conferring >= threads_running) {
			rv = H_TOO_HARD; /* => do yield */
			break;
		}
	}
	clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
	return rv;
}
Ejemplo n.º 15
0
/*
 * Return the number of cores on this SOC.
 */
int cpu_numcores(void)
{
	int numcores;
	u32 mask;

	mask = cpu_mask();
	numcores = hweight32(cpu_mask());

	/* Verify if M4 is deactivated */
	if (mask & 0x1)
		numcores--;

	return numcores;
}
Ejemplo n.º 16
0
static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
{
	unsigned int card_exist;

	card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
	card_exist &= CARD_EXIST;
	if (!card_exist) {
		/* Enable card CD */
		rtsx_pci_write_register(pcr, CD_PAD_CTL,
				CD_DISABLE_MASK, CD_ENABLE);
		/* Enable card interrupt */
		rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00);
		return 0;
	}

	if (hweight32(card_exist) > 1) {
		rtsx_pci_write_register(pcr, CARD_PWR_CTL,
				BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
		msleep(100);

		card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
		if (card_exist & MS_EXIST)
			card_exist = MS_EXIST;
		else if (card_exist & SD_EXIST)
			card_exist = SD_EXIST;
		else
			card_exist = 0;

		rtsx_pci_write_register(pcr, CARD_PWR_CTL,
				BPP_POWER_MASK, BPP_POWER_OFF);

		dev_dbg(&(pcr->pci->dev),
				"After CD deglitch, card_exist = 0x%x\n",
				card_exist);
	}

	if (card_exist & MS_EXIST) {
		/* Disable SD interrupt */
		rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40);
		rtsx_pci_write_register(pcr, CD_PAD_CTL,
				CD_DISABLE_MASK, MS_CD_EN_ONLY);
	} else if (card_exist & SD_EXIST) {
		/* Disable MS interrupt */
		rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80);
		rtsx_pci_write_register(pcr, CD_PAD_CTL,
				CD_DISABLE_MASK, SD_CD_EN_ONLY);
	}

	return card_exist;
}
Ejemplo n.º 17
0
static bool xt_rateest_mt_checkentry(const char *tablename,
				     const void *ip,
				     const struct xt_match *match,
				     void *matchinfo,
				     unsigned int hook_mask)
{
	struct xt_rateest_match_info *info = matchinfo;
	struct xt_rateest *est1, *est2;

	if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
				     XT_RATEEST_MATCH_REL)) != 1)
		goto err1;

	if (!(info->flags & (XT_RATEEST_MATCH_BPS | XT_RATEEST_MATCH_PPS)))
		goto err1;

	switch (info->mode) {
	case XT_RATEEST_MATCH_EQ:
	case XT_RATEEST_MATCH_LT:
	case XT_RATEEST_MATCH_GT:
		break;
	default:
		goto err1;
	}

	est1 = xt_rateest_lookup(info->name1);
	if (!est1)
		goto err1;

	if (info->flags & XT_RATEEST_MATCH_REL) {
		est2 = xt_rateest_lookup(info->name2);
		if (!est2)
			goto err2;
	} else
		est2 = NULL;


	info->est1 = est1;
	info->est2 = est2;
	return true;

err2:
	xt_rateest_put(est1);
err1:
	return false;
}
Ejemplo n.º 18
0
/**
 * check_mutually_exclusive - Check if new_state violates mutually_exclusive
 *			      condition.
 * @edev:	the extcon device
 * @new_state:	new cable attach status for @edev
 *
 * Returns 0 if nothing violates. Returns the index + 1 for the first
 * violated condition.
 */
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
	int i = 0;

	if (!edev->mutually_exclusive)
		return 0;

	for (i = 0; edev->mutually_exclusive[i]; i++) {
		int weight;
		u32 correspondants = new_state & edev->mutually_exclusive[i];

		/* calculate the total number of bits set */
		weight = hweight32(correspondants);
		if (weight > 1)
			return i + 1;
	}

	return 0;
}
Ejemplo n.º 19
0
static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
{
	struct xt_rateest_match_info *info = par->matchinfo;
	struct xt_rateest *est1, *est2;
	int ret = false;

	if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS |
				     XT_RATEEST_MATCH_REL)) != 1)
		goto err1;

	if (!(info->flags & (XT_RATEEST_MATCH_BPS | XT_RATEEST_MATCH_PPS)))
		goto err1;

	switch (info->mode) {
	case XT_RATEEST_MATCH_EQ:
	case XT_RATEEST_MATCH_LT:
	case XT_RATEEST_MATCH_GT:
		break;
	default:
		goto err1;
	}

	ret  = -ENOENT;
	est1 = xt_rateest_lookup(info->name1);
	if (!est1)
		goto err1;

	if (info->flags & XT_RATEEST_MATCH_REL) {
		est2 = xt_rateest_lookup(info->name2);
		if (!est2)
			goto err2;
	} else
		est2 = NULL;

	info->est1 = est1;
	info->est2 = est2;
	return 0;

err2:
	xt_rateest_put(est1);
err1:
	return -EINVAL;
}
static int ux500_msp_dai_hw_params(struct snd_pcm_substream *substream,
				struct snd_pcm_hw_params *params,
				struct snd_soc_dai *dai)
{
	unsigned int mask, slots_active;
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev);

	dev_dbg(dai->dev, "%s: MSP %d (%s): Enter.\n",
			__func__, dai->id, snd_pcm_stream_str(substream));

	switch (drvdata->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
	case SND_SOC_DAIFMT_I2S:
		snd_pcm_hw_constraint_minmax(runtime,
				SNDRV_PCM_HW_PARAM_CHANNELS,
				1, 2);
		break;

	case SND_SOC_DAIFMT_DSP_B:
	case SND_SOC_DAIFMT_DSP_A:
		mask = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
			drvdata->tx_mask :
			drvdata->rx_mask;

		slots_active = hweight32(mask);
		dev_dbg(dai->dev, "TDM-slots active: %d", slots_active);

		snd_pcm_hw_constraint_minmax(runtime,
				SNDRV_PCM_HW_PARAM_CHANNELS,
				slots_active, slots_active);
		break;

	default:
		dev_err(dai->dev,
			"%s: Error: Unsupported protocol (fmt = 0x%x)!\n",
			__func__, drvdata->fmt);
		return -EINVAL;
	}

	return 0;
}
Ejemplo n.º 21
0
static int gm20b_determine_L2_size_bytes(struct gk20a *g)
{
	u32 lts_per_ltc;
	u32 ways;
	u32 sets;
	u32 bytes_per_line;
	u32 active_ltcs;
	u32 cache_size;

	u32 tmp;
	u32 active_sets_value;

	tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r());
	ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp));

	active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
	if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
		sets = 64;
	} else if (active_sets_value ==
		 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
		sets = 32;
	} else if (active_sets_value ==
		 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
		sets = 16;
	} else {
		dev_err(dev_from_gk20a(g),
			"Unknown constant %u for active sets",
		       (unsigned)active_sets_value);
		sets = 0;
	}

	active_ltcs = g->gr.num_fbps;

	/* chip-specific values */
	lts_per_ltc = 2;
	bytes_per_line = 128;
	cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;

	return cache_size;
}
Ejemplo n.º 22
0
int core_to_pos(int nr)
{
	u32 cores = cpu_mask();
	int i, count = 0;

	if (nr == 0) {
		return 0;
	} else if (nr >= hweight32(cores)) {
		puts("Not a valid core number.\n");
		return -1;
	}

	for (i = 1; i < 32; i++) {
		if (is_core_valid(i)) {
			count++;
			if (count == nr)
				break;
		}
	}

	return count;
}
Ejemplo n.º 23
0
static int make_even_parity(u32 x)
{
	return hweight32(x) & 1;
}
Ejemplo n.º 24
0
static void mlog_reset_format(void)
{
    int len;

    spin_lock_bh(&mlogbuf_lock);
    
    if (meminfo_filter)
        meminfo_filter = M_FILTER_ALL;
    if (vmstat_filter)
        vmstat_filter = V_FILTER_ALL;
    if (buddyinfo_filter)
        buddyinfo_filter = B_FILTER_ALL;
    if (proc_filter)
        proc_filter = P_FILTER_ALL;

    /* calc len */
    len = 4; // id, type, sec, nanosec

    len += hweight32 (meminfo_filter);
    len += hweight32 (vmstat_filter);

    // buddyinfo
    len += (2 * MAX_ORDER);
	
    if (proc_filter)
    {
        len++; /* PID */        
        len += hweight32(proc_filter);
    }

    if (!strfmt_list || strfmt_len != len)
    {
        if (strfmt_list)
            kfree(strfmt_list);
        strfmt_list = kmalloc(sizeof(char *)*len, GFP_ATOMIC);
        strfmt_len = len;
        BUG_ON(!strfmt_list);
    }

    /* setup str format */
    len = 0;
    strfmt_proc = 0;
    strfmt_list[len++] = cr_str;
    strfmt_list[len++] = type_str;
    strfmt_list[len++] = time_sec_str;
    strfmt_list[len++] = time_nanosec_str;

    if (meminfo_filter)
    {
        int i;
        for (i = 0; i < hweight32(meminfo_filter); ++i)
            strfmt_list[len++] = mem_size_str;
    }

    if (vmstat_filter)
    {
        int i;
        for (i = 0; i < hweight32(vmstat_filter); ++i)
            strfmt_list[len++] = acc_count_str;
    }

    if (buddyinfo_filter)
    {
        int i, j;
        // normal and high zone
        for (i = 0; i < 2; ++i) {
	        strfmt_list[len++] = order_start_str;
	        for (j = 0; j < MAX_ORDER - 2; ++j) {
	            strfmt_list[len++] = order_middle_str;
	        }
	        strfmt_list[len++] = order_end_str;
        }
    }

    if (proc_filter)
    {
        int i;
        strfmt_proc = len;
        strfmt_list[len++] = pid_str;   /* PID */
        strfmt_list[len++] = adj_str;   /* ADJ */
        for (i = 0; i < hweight32(proc_filter & (P_FMT_SIZE)); ++i)
            strfmt_list[len++] = mem_size_str;
        for (i = 0; i < hweight32(proc_filter & (P_FMT_COUNT)); ++i)
            strfmt_list[len++] = acc_count_str;
    }
    strfmt_idx = 0;

    BUG_ON(len != strfmt_len);
    spin_unlock_bh(&mlogbuf_lock);

    MLOG_PRINTK("[mlog] reset format %d", strfmt_len);
    for (len = 0; len < strfmt_len; ++len)
    {
        MLOG_PRINTK(" %s", strfmt_list[len]);
    }
    MLOG_PRINTK("\n");
}
Ejemplo n.º 25
0
int fsl_layerscape_wake_seconday_cores(void)
{
	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
#ifdef CONFIG_FSL_LSCH3
	struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
#elif defined(CONFIG_FSL_LSCH2)
	struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR);
#endif
	u32 cores, cpu_up_mask = 1;
	int i, timeout = 10;
	u64 *table = get_spin_tbl_addr();

#ifdef COUNTER_FREQUENCY_REAL
	/* update for secondary cores */
	__real_cntfrq = COUNTER_FREQUENCY_REAL;
	flush_dcache_range((unsigned long)&__real_cntfrq,
			   (unsigned long)&__real_cntfrq + 8);
#endif

	cores = cpu_mask();
	/* Clear spin table so that secondary processors
	 * observe the correct value after waking up from wfe.
	 */
	memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
	flush_dcache_range((unsigned long)table,
			   (unsigned long)table +
			   (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));

	printf("Waking secondary cores to start from %lx\n", gd->relocaddr);

#ifdef CONFIG_FSL_LSCH3
	gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
	gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
	gur_out32(&gur->scratchrw[6], 1);
	asm volatile("dsb st" : : : "memory");
	rst->brrl = cores;
	asm volatile("dsb st" : : : "memory");
#elif defined(CONFIG_FSL_LSCH2)
	scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
	scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
	asm volatile("dsb st" : : : "memory");
	gur_out32(&gur->brrl, cores);
	asm volatile("dsb st" : : : "memory");

	/* Bootup online cores */
	scfg_out32(&scfg->corebcr, cores);
#endif
	/* This is needed as a precautionary measure.
	 * If some code before this has accidentally  released the secondary
	 * cores then the pre-bootloader code will trap them in a "wfe" unless
	 * the scratchrw[6] is set. In this case we need a sev here to get these
	 * cores moving again.
	 */
	asm volatile("sev");

	while (timeout--) {
		flush_dcache_range((unsigned long)table, (unsigned long)table +
				   CONFIG_MAX_CPUS * 64);
		for (i = 1; i < CONFIG_MAX_CPUS; i++) {
			if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
					SPIN_TABLE_ELEM_STATUS_IDX])
				cpu_up_mask |= 1 << i;
		}
		if (hweight32(cpu_up_mask) == hweight32(cores))
			break;
		udelay(10);
	}
	if (timeout <= 0) {
		printf("Not all cores (0x%x) are up (0x%x)\n",
		       cores, cpu_up_mask);
		return 1;
	}
	printf("All (%d) cores are up.\n", hweight32(cores));

	return 0;
}
Ejemplo n.º 26
0
/**
 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
 * @dev: DRM device
 * @crtc: intel crtc
 * @crtc_state: incoming crtc_state to validate and setup scalers
 *
 * This function sets up scalers based on staged scaling requests for
 * a @crtc and its planes. It is called from crtc level check path. If request
 * is a supportable request, it attaches scalers to requested planes and crtc.
 *
 * This function takes into account the current scaler(s) in use by any planes
 * not being part of this atomic state
 *
 *  Returns:
 *         0 - scalers were setup succesfully
 *         error code - otherwise
 */
int intel_atomic_setup_scalers(struct drm_device *dev,
	struct intel_crtc *intel_crtc,
	struct intel_crtc_state *crtc_state)
{
	struct drm_plane *plane = NULL;
	struct intel_plane *intel_plane;
	struct intel_plane_state *plane_state = NULL;
	struct intel_crtc_scaler_state *scaler_state =
		&crtc_state->scaler_state;
	struct drm_atomic_state *drm_state = crtc_state->base.state;
	int num_scalers_need;
	int i, j;

	num_scalers_need = hweight32(scaler_state->scaler_users);

	/*
	 * High level flow:
	 * - staged scaler requests are already in scaler_state->scaler_users
	 * - check whether staged scaling requests can be supported
	 * - add planes using scalers that aren't in current transaction
	 * - assign scalers to requested users
	 * - as part of plane commit, scalers will be committed
	 *   (i.e., either attached or detached) to respective planes in hw
	 * - as part of crtc_commit, scaler will be either attached or detached
	 *   to crtc in hw
	 */

	/* fail if required scalers > available scalers */
	if (num_scalers_need > intel_crtc->num_scalers){
		DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
			num_scalers_need, intel_crtc->num_scalers);
		return -EINVAL;
	}

	/* walkthrough scaler_users bits and start assigning scalers */
	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
		int *scaler_id;
		const char *name;
		int idx;

		/* skip if scaler not required */
		if (!(scaler_state->scaler_users & (1 << i)))
			continue;

		if (i == SKL_CRTC_INDEX) {
			name = "CRTC";
			idx = intel_crtc->base.base.id;

			/* panel fitter case: assign as a crtc scaler */
			scaler_id = &scaler_state->scaler_id;
		} else {
			name = "PLANE";

			/* plane scaler case: assign as a plane scaler */
			/* find the plane that set the bit as scaler_user */
			plane = drm_state->planes[i];

			/*
			 * to enable/disable hq mode, add planes that are using scaler
			 * into this transaction
			 */
			if (!plane) {
				struct drm_plane_state *state;
				plane = drm_plane_from_index(dev, i);
				state = drm_atomic_get_plane_state(drm_state, plane);
				if (IS_ERR(state)) {
					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
						plane->base.id);
					return PTR_ERR(state);
				}

				/*
				 * the plane is added after plane checks are run,
				 * but since this plane is unchanged just do the
				 * minimum required validation.
				 */
				crtc_state->base.planes_changed = true;
			}

			intel_plane = to_intel_plane(plane);
			idx = plane->base.id;

			/* plane on different crtc cannot be a scaler user of this crtc */
			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
				continue;
			}

			plane_state = to_intel_plane_state(drm_state->plane_states[i]);
			scaler_id = &plane_state->scaler_id;
		}

		if (*scaler_id < 0) {
			/* find a free scaler */
			for (j = 0; j < intel_crtc->num_scalers; j++) {
				if (!scaler_state->scalers[j].in_use) {
					scaler_state->scalers[j].in_use = 1;
					*scaler_id = j;
					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
						intel_crtc->pipe, *scaler_id, name, idx);
					break;
				}
			}
		}

		if (WARN_ON(*scaler_id < 0)) {
			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
			continue;
		}

		/* set scaler mode */
		if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
			/*
			 * when only 1 scaler is in use on either pipe A or B,
			 * scaler 0 operates in high quality (HQ) mode.
			 * In this case use scaler 0 to take advantage of HQ mode
			 */
			*scaler_id = 0;
			scaler_state->scalers[0].in_use = 1;
			scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
			scaler_state->scalers[1].in_use = 0;
		} else {
			scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
		}
	}

	return 0;
}
Ejemplo n.º 27
0
static inline RT_TASK* __task_init(unsigned long name, int prio, int stack_size, int max_msg_size, int cpus_allowed)
{
	void *msg_buf0, *msg_buf1;
	RT_TASK *rt_task;

	if ((rt_task = current->rtai_tskext(TSKEXT0))) {
		if (num_online_cpus() > 1 && cpus_allowed) {
	    		cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed);
		} else {
			cpus_allowed = rtai_cpuid();
		}
		put_current_on_cpu(cpus_allowed);
		return rt_task;
	}
	if (rt_get_adr(name)) {
		return 0;
	}
	if (prio > RT_SCHED_LOWEST_PRIORITY) {
		prio = RT_SCHED_LOWEST_PRIORITY;
	}
	if (!max_msg_size) {
		max_msg_size = USRLAND_MAX_MSG_SIZE;
	}
	if (!(msg_buf0 = rt_malloc(max_msg_size))) {
		return 0;
	}
	if (!(msg_buf1 = rt_malloc(max_msg_size))) {
		rt_free(msg_buf0);
		return 0;
	}
	rt_task = rt_malloc(sizeof(RT_TASK) + 3*sizeof(struct fun_args)); 
	if (rt_task) {
	    rt_task->magic = 0;
	    if (num_online_cpus() > 1 && cpus_allowed) {
			cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed);
	    } else {
			cpus_allowed = rtai_cpuid();
	    }
	    if (!set_rtext(rt_task, prio, 0, 0, cpus_allowed, 0)) {
	        rt_task->fun_args = (long *)((struct fun_args *)(rt_task + 1));
		rt_task->msg_buf[0] = msg_buf0;
		rt_task->msg_buf[1] = msg_buf1;
		rt_task->max_msg_size[0] =
		rt_task->max_msg_size[1] = max_msg_size;
		if (rt_register(name, rt_task, IS_TASK, 0)) {
			rt_task->state = 0;
#ifdef __IPIPE_FEATURE_ENABLE_NOTIFIER
			ipipe_enable_notifier(current);
#else
			current->flags |= PF_EVNOTIFY;
#endif
#if (defined VM_PINNED) && (defined CONFIG_MMU)
			ipipe_disable_ondemand_mappings(current);
#endif
			RTAI_OOM_DISABLE();

			return rt_task;
		} else {
			clr_rtext(rt_task);
		}
	    }
	    rt_free(rt_task);
	}
	rt_free(msg_buf0);
	rt_free(msg_buf1);
	return 0;
}
Ejemplo n.º 28
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase, fusebase;
	uint32_t syscoremask, mask, fusemask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		if (n != 0) {
			/* check if node exists and is online */
			if (cpu_is_xlp9xx()) {
				int b = xlp9xx_get_socbus(n);
				pr_info("Node %d SoC PCI bus %d.\n", n, b);
				if (b == 0)
					break;
			} else {
				syspcibase = nlm_get_sys_pcibase(n);
				if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
					break;
			}
			nlm_node_init(n);
		}

		/* read cores in reset from SYS */
		nodep = nlm_get_node(n);

		if (cpu_is_xlp9xx()) {
			fusebase = nlm_get_fuse_regbase(n);
			fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP5XX:
				mask = 0xff;
				break;
			case PRID_IMP_NETLOGIC_XLP9XX:
			default:
				mask = 0xfffff;
				break;
			}
		} else {
			fusemask = nlm_read_sys_reg(nodep->sysbase,
						SYS_EFUSE_DEVICE_CFG_STATUS0);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP3XX:
				mask = 0xf;
				break;
			case PRID_IMP_NETLOGIC_XLP2XX:
				mask = 0x3;
				break;
			case PRID_IMP_NETLOGIC_XLP8XX:
			default:
				mask = 0xff;
				break;
			}
		}

		/*
		 * Fused out cores are set in the fusemask, and the remaining
		 * cores are renumbered to range 0 .. nactive-1
		 */
		syscoremask = (1 << hweight32(~fusemask & mask)) - 1;

		pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
		for (core = 0; core < nlm_cores_per_node(); core++) {
			/* we will be on node 0 core 0 */
			if (n == 0 && core == 0)
				continue;

			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first hw thread is enabled */
			cpu = (n * nlm_cores_per_node() + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (!xlp_wakeup_core(nodep->sysbase, n, core))
				continue;

			/* core is up */
			nodep->coremask |= 1u << core;

			/* spin until the hw threads sets their ready */
			if (!wait_for_cpus(cpu, 0))
				pr_err("Node %d : timeout core %d\n", n, core);
		}
	}
}
Ejemplo n.º 29
0
static int aic33_update(int flag, int val)
{
	u16 volume;
	s16 left_gain, left_val, right_gain, right_val;

	switch (flag) {
	case SET_VOLUME:
		/* Ignore separate left/right channel for now,
	   	   even the codec does support it. */
		val &= 0xff;

		if (val < 0 || val > 100) {
			DPRINTK("Trying a bad volume value(%d)!\n", val);
			return -EPERM;
		}
		// Convert 0 -> 100 volume to 0x77 (LHV_MIN) -> 0x00 (LHV_MAX)
		volume =
		    ((val * OUTPUT_VOLUME_RANGE) / 100) + OUTPUT_VOLUME_MIN;

		aic33_local.volume_reg = OUTPUT_VOLUME_MAX - volume;

		if (aic33_local.nochan == STEREO) {
			audio_aic33_write(47, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(64, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(82, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(92, LOPM_ON | aic33_local.volume_reg);
		} else if (aic33_local.nochan == MONO) {
#ifdef CONFIG_MONOSTEREO_DIFFJACK
			/* DACL1 to MONO_LOP/M routing and volume control */
			audio_aic33_write(75, LOPM_ON | aic33_local.volume_reg);

			/* DACR1 to MONO_LOP/M routing and volume control */
			audio_aic33_write(78, LOPM_ON | aic33_local.volume_reg);
#else
			audio_aic33_write(47, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(64, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(82, LOPM_ON | aic33_local.volume_reg);
			audio_aic33_write(92, LOPM_ON | aic33_local.volume_reg);
#endif
		}

		break;

	case SET_LINE:
	case SET_MIC:
		/* Ignore separate left/right channel for now,
	   	   even the codec does support it. */
		val &= 0xff;

		if (val < 0 || val > 100) {
			DPRINTK("Trying a bad volume value(%d)!\n", val);
			return -EPERM;
		}

		volume = ((val * INPUT_VOLUME_RANGE) / 100) + INPUT_VOLUME_MIN;

		aic33_local.input_volume_reg = volume;

		audio_aic33_write(15, aic33_local.input_volume_reg);
		audio_aic33_write(16, aic33_local.input_volume_reg);

		break;

	case SET_RECSRC:
		/* Ignore separate left/right channel for now,
	   	   even the codec does support it. */
		val &= 0xff;

		if (hweight32(val) > 1)
			val &= ~aic33_local.recsrc;

		if (val == SOUND_MASK_MIC) {
			/* enable the mic input*/
			DPRINTK("Enabling mic\n");
			audio_aic33_write(17, 0x0);
			audio_aic33_write(18, 0x0);

			/* enable ADC's and disable the line input*/
			audio_aic33_write(19, 0x7C);
			audio_aic33_write(22, 0x7C);

		}
		else if (val == SOUND_MASK_LINE) {
			/* enable ADC's, enable line iput */
			DPRINTK(" Enabling line in\n");
			audio_aic33_write(19, 0x4);
			audio_aic33_write(22, 0x4);

			/* disable the mic input */
			audio_aic33_write(17, 0xff);
			audio_aic33_write(18, 0xff);
		}
		else {
			/* do nothing */
		}
		aic33_local.recsrc = val;
		break;

	case SET_IGAIN:
		left_val = val & 0xFF;
		right_val = val >> 8;

		if (left_val < 0 || left_val > 100) {
			DPRINTK("Trying a bad igain value(%d)!\n", left_val);
			return -EPERM;
		}
		if (right_val < 0 || right_val > 100) {
			DPRINTK("Trying a bad igain value(%d)!\n", right_val);
			return -EPERM;
		}

		left_gain = ((left_val * INPUT_GAIN_RANGE) / 100) + INPUT_GAIN_MIN;
		right_gain = ((right_val * INPUT_GAIN_RANGE) / 100) + INPUT_GAIN_MIN;

		DPRINTK("left gain reg val = 0x%x", left_gain << 1);
		DPRINTK("right gain reg val = 0x%x", left_gain << 1);

		/* Left AGC control */
		audio_aic33_write(26, 0x80);
		audio_aic33_write(27, left_gain << 1);
		audio_aic33_write(28, 0x0);

		/* Right AGC control */
		audio_aic33_write(29, 0x80);
		audio_aic33_write(30, right_gain << 1);
		audio_aic33_write(31, 0x0);

		break;

	case SET_OGAIN:
		left_val = val & 0xFF;
		right_val = val >> 8;

		if (left_val < 0 || left_val > 100) {
			DPRINTK("Trying a bad igain value(%d)!\n", left_val);
			return -EPERM;
		}
		if (right_val < 0 || right_val > 100) {
			DPRINTK("Trying a bad igain value(%d)!\n", right_val);
			return -EPERM;
		}

		left_gain = ((left_val * OUTPUT_GAIN_RANGE) / 100) + OUTPUT_GAIN_MIN;
		left_gain = OUTPUT_GAIN_MAX - left_gain;
		right_gain = ((right_val * OUTPUT_GAIN_RANGE) / 100) + OUTPUT_GAIN_MIN;
		right_gain = OUTPUT_GAIN_MAX - right_gain;

		/* Left/Right DAC digital volume gain */
		audio_aic33_write(43, left_gain);
		audio_aic33_write(44, right_gain);
		break;

	case SET_MICBIAS:
		/* Ignore separate left/right channel for now,
	   	   even the codec does support it. */
		val &= 0xff;

		if (val < 0 || val > 3) {
			DPRINTK
			    ("Request for non supported mic bias level(%d)!\n",
			     val);
			return -EPERM;
		}

		if (val == 0)
			audio_aic33_write(25, 0x00);

		else if (val == 1)
			audio_aic33_write(25, MICBIAS_OUTPUT_2_0V);

		else if (val == 2)
			audio_aic33_write(25, MICBIAS_OUTPUT_2_5V);

		else if (val == 3)
			audio_aic33_write(25, MICBIAS_OUTPUT_AVDD);

		break;

	case SET_BASS:
		break;

	case SET_TREBLE:
		break;
	}
	return 0;
}
/*
 * As long as it has been decided to have a deeper modification of
 * what job scheduler, power manager and affinity manager will
 * implement, this function is just an intermediate step that
 * assumes:
 * - all working cores will be powered on when this is called.
 * - largest current configuration is 2 core groups.
 * - It has been decided not to have hardcoded values so the low
 *   and high cores in a core split will be evently distributed.
 * - Odd combinations of core requirements have been filtered out
 *   and do not get to this function (e.g. CS+T+NSS is not
 *   supported here).
 * - This function is frequently called and can be optimized,
 *   (see notes in loops), but as the functionallity will likely
 *   be modified, optimization has not been addressed.
*/
mali_bool kbase_js_choose_affinity(u64 * const affinity, kbase_device *kbdev, kbase_jd_atom *katom, int js)
{
	base_jd_core_req core_req = katom->core_req;
	unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
	u64 core_availability_mask;
	unsigned long flags;

	spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);

	core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);

	/*
	 * If no cores are currently available (core availability policy is
	 * transitioning) then fail.
	 */
	if (0 == core_availability_mask)
	{
		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
		*affinity = 0;
		return MALI_FALSE;
	}

	KBASE_DEBUG_ASSERT(js >= 0);

	if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) == BASE_JD_REQ_T)
	{
		spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
		/* Tiler only job, bit 0 needed to enable tiler but no shader cores required */
		*affinity = 1;
		return MALI_TRUE;
	}

	if (1 == kbdev->gpu_props.num_cores) {
		/* trivial case only one core, nothing to do */
		*affinity = core_availability_mask;
	} else if (kbase_affinity_requires_split(kbdev) == MALI_FALSE) {
		if ((core_req & (BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
			if (js == 0 || num_core_groups == 1) {
				/* js[0] and single-core-group systems just get the first core group */
				*affinity = kbdev->gpu_props.props.coherency_info.group[0].core_mask & core_availability_mask;
			} else {
				/* js[1], js[2] use core groups 0, 1 for dual-core-group systems */
				u32 core_group_idx = ((u32) js) - 1;
				KBASE_DEBUG_ASSERT(core_group_idx < num_core_groups);
				*affinity = kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask & core_availability_mask;

				/* If the job is specifically targeting core group 1 and the core
				 * availability policy is keeping that core group off, then fail */
				if (*affinity == 0 && core_group_idx == 1 && kbdev->pm.cg1_disabled == MALI_TRUE)
					katom->event_code = BASE_JD_EVENT_PM_EVENT;
			}
		} else {
			/* All cores are available when no core split is required */
			*affinity = core_availability_mask;
		}
	} else {
		/* Core split required - divide cores in two non-overlapping groups */
		u64 low_bitmap, high_bitmap;
		int n_high_cores = kbdev->gpu_props.num_cores >> 1;
		KBASE_DEBUG_ASSERT(1 == num_core_groups);
		KBASE_DEBUG_ASSERT(0 != n_high_cores);

		/* compute the reserved high cores bitmap */
		high_bitmap = ~0;
		/* note: this can take a while, optimization desirable */
		while (n_high_cores != hweight32(high_bitmap & kbdev->shader_present_bitmap))
			high_bitmap = high_bitmap << 1;

		high_bitmap &= core_availability_mask;
		low_bitmap = core_availability_mask ^ high_bitmap;

		if (affinity_job_uses_high_cores(kbdev, katom))
			*affinity = high_bitmap;
		else
			*affinity = low_bitmap;
	}

	spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);

	/*
	 * If no cores are currently available in the desired core group(s)
	 * (core availability policy is transitioning) then fail.
	 */
	if (*affinity == 0)
		return MALI_FALSE;

	/* Enable core 0 if tiler required */
	if (core_req & BASE_JD_REQ_T)
		*affinity = *affinity | 1;

	return MALI_TRUE;
}