Esempio n. 1
0
unsigned long long pgm_modulate(pgs_signal_t** _modulated_signals,
		pgs_block_t* _premodulated_blocks,
		unsigned long long _premodulated_blocks_count,
		unsigned int _modulation)
{
	if (unlikely(_premodulated_blocks == NULL))
		return 0;

	*_modulated_signals = pgt_alloc(_premodulated_blocks_count, sizeof(pgs_signal_t));

#if defined (_OPENMP)
#pragma omp parallel for
#endif
	for (unsigned long long i = 0; i < _premodulated_blocks_count; i++)
		pgm_modulate_block(&((pgs_signal_t*)(*_modulated_signals))[i], &_premodulated_blocks[i], _modulation);

	return _premodulated_blocks_count;
}
Esempio n. 2
0
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
				struct user_ta_ctx *utc)
{
	struct core_mmu_table_info pg_info;
	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
	struct pgt *pgt;
	size_t n;

	if (!utc->mmu->size)
		return;	/* Nothing to map */

	/* Find the last valid entry */
	n = utc->mmu->size;
	while (true) {
		n--;
		if (utc->mmu->table[n].size)
			break;
		if (!n)
			return;	/* Nothing to map */
	}

	/*
	 * Allocate all page tables in advance.
	 */
	pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->table[0].va,
		  utc->mmu->table[n].va + utc->mmu->table[n].size - 1);
	pgt = SLIST_FIRST(pgt_cache);

	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);

	for (n = 0; n < utc->mmu->size; n++) {
		if (!utc->mmu->table[n].size)
			continue;
		set_pg_region(dir_info, utc->mmu->table + n, &pgt, &pg_info);
	}
}