Esempio n. 1
0
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
Esempio n. 2
0
/**
 *	prandom_bytes - get the requested number of pseudo-random bytes
 *	@buf: where to copy the pseudo-random bytes to
 *	@bytes: the requested number of bytes
 */
void prandom_bytes(void *buf, int bytes)
{
	unsigned char *p = buf;
	int i;

	for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
		u32 random = random32();
		int j;

		for (j = 0; j < sizeof(u32); j++) {
			p[i + j] = random;
			random >>= BITS_PER_BYTE;
		}
	}

	if (i < bytes) {
		u32 random = random32();

		for (; i < bytes; i++) {
			p[i] = random;
			random >>= BITS_PER_BYTE;
		}
	}
Esempio n. 3
0
/*
 * Adds the specified range to what will become the new identity mappings.
 * Once all ranges have been added, the new mapping is activated by calling
 * finalize_identity_maps() below.
 */
void add_identity_map(unsigned long start, unsigned long size)
{
	struct x86_mapping_info mapping_info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.context	= &pgt_data,
		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
	};
	unsigned long end = start + size;

	/* Make sure we have a top level page table ready to use. */
	if (!level4p)
		prepare_level4();

	/* Align boundary to 2M. */
	start = round_down(start, PMD_SIZE);
	end = round_up(end, PMD_SIZE);
	if (start >= end)
		return;

	/* Build the mapping. */
	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
				  start, end);
}
Esempio n. 4
0
static void print_shadow_for_address(const void *addr)
{
	int i;
	const void *shadow = kasan_mem_to_shadow(addr);
	const void *shadow_row;

	shadow_row = (void *)round_down((unsigned long)shadow,
					SHADOW_BYTES_PER_ROW)
		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;

	pr_err("Memory state around the buggy address:\n");

	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
		const void *kaddr = kasan_shadow_to_mem(shadow_row);
		char buffer[4 + (BITS_PER_LONG/8)*2];
		char shadow_buf[SHADOW_BYTES_PER_ROW];

		snprintf(buffer, sizeof(buffer),
			(i == 0) ? ">%p: " : " %p: ", kaddr);
		/*
		 * We should not pass a shadow pointer to generic
		 * function, because generic functions may try to
		 * access kasan mapping for the passed address.
		 */
		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
		print_hex_dump(KERN_ERR, buffer,
			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
			shadow_buf, SHADOW_BYTES_PER_ROW, 0);

		if (row_is_guilty(shadow_row, shadow))
			pr_err("%*c\n",
				shadow_pointer_offset(shadow_row, shadow),
				'^');

		shadow_row += SHADOW_BYTES_PER_ROW;
	}
}
Esempio n. 5
0
static int chacha_stream_xor(struct skcipher_request *req,
			     struct chacha_ctx *ctx, u8 *iv)
{
	struct skcipher_walk walk;
	u32 state[16];
	int err;

	err = skcipher_walk_virt(&walk, req, false);

	crypto_chacha_init(state, ctx, iv);

	while (walk.nbytes > 0) {
		unsigned int nbytes = walk.nbytes;

		if (nbytes < walk.total)
			nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);

		chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
			       nbytes, ctx->nrounds);
		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
	}

	return err;
}
Esempio n. 6
0
/*
 * This is a common helper function for find_next_bit and
 * find_next_zero_bit.  The difference is the "invert" argument, which
 * is XORed with each fetched word before searching it for one bits.
 */
static unsigned long _find_next_bit(const unsigned long *addr,
		unsigned long nbits, unsigned long start, unsigned long invert)
{
	unsigned long tmp;

	if (!nbits || start >= nbits)
		return nbits;

	tmp = addr[start / BITS_PER_LONG] ^ invert;

	/* Handle 1st word. */
	tmp &= BITMAP_FIRST_WORD_MASK(start);
	start = round_down(start, BITS_PER_LONG);

	while (!tmp) {
		start += BITS_PER_LONG;
		if (start >= nbits)
			return nbits;

		tmp = addr[start / BITS_PER_LONG] ^ invert;
	}

	return min(start + __ffs(tmp), nbits);
}
Esempio n. 7
0
/* 
 * Free bootmem based on the e820 table for a node.
 */
void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
{
	int i;
	for (i = 0; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i]; 
		unsigned long last, addr;

		if (ei->type != E820_RAM || 
		    ei->addr+ei->size <= start || 
		    ei->addr >= end)
			continue;

		addr = round_up(ei->addr, PAGE_SIZE);
		if (addr < start) 
			addr = start;

		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
		if (last >= end)
			last = end; 

		if (last > addr && last-addr >= PAGE_SIZE)
			free_bootmem_node(pgdat, addr, last-addr);
	}
}
Esempio n. 8
0
struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long index;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 *
	 * This check is also necessary for ensuring page poisoning
	 * works as expected when enabled
	 */
	if (unlikely(!base))
		return NULL;
#endif
	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return get_entry(base, index);
}
static bool ccu_data_offsets_valid(struct ccu_data *ccu)
{
	struct ccu_policy *ccu_policy = &ccu->policy;
	u32 limit;

	limit = ccu->range - sizeof(u32);
	limit = round_down(limit, sizeof(u32));
	if (ccu_policy_exists(ccu_policy)) {
		if (ccu_policy->enable.offset > limit) {
			pr_err("%s: bad policy enable offset for %s "
					"(%u > %u)\n", __func__,
				ccu->name, ccu_policy->enable.offset, limit);
			return false;
		}
		if (ccu_policy->control.offset > limit) {
			pr_err("%s: bad policy control offset for %s "
					"(%u > %u)\n", __func__,
				ccu->name, ccu_policy->control.offset, limit);
			return false;
		}
	}

	return true;
}
Esempio n. 10
0
/*
 * afu_eb_read:
 * Called from sysfs and reads the afu error info buffer. The h/w only supports
 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
 */
ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
				loff_t off, size_t count)
{
	loff_t aligned_start, aligned_end;
	size_t aligned_length;
	void *tbuf;
	const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;

	if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
		return 0;

	/* calculate aligned read window */
	count = min((size_t)(afu->eb_len - off), count);
	aligned_start = round_down(off, 8);
	aligned_end = round_up(off + count, 8);
	aligned_length = aligned_end - aligned_start;

	/* max we can copy in one read is PAGE_SIZE */
	if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
		aligned_length = ERR_BUFF_MAX_COPY_SIZE;
		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
	}

	/* use bounce buffer for copy */
	tbuf = (void *)__get_free_page(GFP_TEMPORARY);
	if (!tbuf)
		return -ENOMEM;

	/* perform aligned read from the mmio region */
	memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
	memcpy(buf, tbuf + (off & 0x7), count);

	free_page((unsigned long)tbuf);

	return count;
}
Esempio n. 11
0
/* 어린이를 위한 OLPC 찾기 */
void __init olpc_ofw_detect(void)
{
	struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
	unsigned long start;

	/* ensure OFW booted us by checking for "OFW " string */
	if (hdr->ofw_magic != OLPC_OFW_SIG)
		return;

	olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;

	if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
		printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
				(unsigned long)olpc_ofw_cif);
		olpc_ofw_cif = NULL;
		return;
	}

	/* determine where OFW starts in memory */
	start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
	printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
			(unsigned long)olpc_ofw_cif, (-start) >> 20);
	reserve_top_address(-start);
}
Esempio n. 12
0
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call bellow).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
			 pfn_to_nid(virt_to_pfn(_text)));

	/*
	 * vmemmap_populate() has populated the shadow region that covers the
	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
	 * kasan_populate_zero_shadow() from replacing the page table entries
	 * (PMD or PTE) at the edges of the shadow region for the kernel
	 * image.
	 */
	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
				   (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
				   kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
					   (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		/*
		 * end + 1 here is intentional. We check several shadow bytes in
		 * advance to slightly speed up fastpath. In some rare cases
		 * we could cross boundary of mapped shadow, so we just map
		 * some more here.
		 */
		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
				(unsigned long)kasan_mem_to_shadow(end) + 1,
				pfn_to_nid(virt_to_pfn(start)));
	}
template <  typename IN_PORT_TYPE > int file_descriptor_sink_i_base::_forecastAndProcess( bool &eos, typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;

    typename _IStreamList::iterator istream = istreams.begin();
    int nout = 0;
    bool dataReady = false;
    if ( !eos ) {
        uint64_t max_items_avail = 0;
        for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
            LOG_TRACE( file_descriptor_sink_i_base, "GET MAX ITEMS: STREAM:" << idx << " NITEMS/SCALARS:"
                      << istream->nitems() << "/" << istream->_data.size() );
            max_items_avail = std::max( istream->nitems(), max_items_avail );
        }

        //
        // calc number of output items to produce
        //
        noutput_items = (int) (max_items_avail * gr_sptr->relative_rate ());
        noutput_items = round_down (noutput_items, gr_sptr->output_multiple ());

        if ( noutput_items <= 0  ) {
           LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - MAX ITEMS  NOUTPUT/MAX_ITEMS:" <<   noutput_items << "/" << max_items_avail);
           return -1;
        }

        if ( gr_sptr->fixed_rate() ) {
            istream = istreams.begin();
            for ( int i=0; istream != istreams.end(); i++, istream++ ) {
                int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
                if ( gr_sptr->output_multiple_set() ) {
                    t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
                }
                if ( t_noutput_items > 0 ) {
                    if ( noutput_items == 0 ) {
                        noutput_items = t_noutput_items;
                    }
                    if ( t_noutput_items <= noutput_items ) {
                        noutput_items = t_noutput_items;
                    }
                }
            }
            LOG_TRACE( file_descriptor_sink_i_base, " FIXED FORECAST NOUTPUT/output_multiple == " 
                      << noutput_items  << "/" << gr_sptr->output_multiple());
        }

        //
        // ask the block how much input they need to produce noutput_items...
        // if enough data is available to process then set the dataReady flag
        //
        int32_t  outMultiple = gr_sptr->output_multiple();
        while ( !dataReady && noutput_items >= outMultiple  ) {
            //
            // ask the block how much input they need to produce noutput_items...
            //
            gr_sptr->forecast(noutput_items, _ninput_items_required);

            LOG_TRACE( file_descriptor_sink_i_base, "--> FORECAST IN/OUT " << _ninput_items_required[0]  << "/" << noutput_items  );

            istream = istreams.begin();
            uint32_t dr_cnt=0;
            for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
                // check if buffer has enough elements
                _input_ready[idx] = false;
                if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
                    _input_ready[idx] = true;
                    dr_cnt++;
                }
                LOG_TRACE( file_descriptor_sink_i_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << 
                          istream->nelems() << "/" << istream->nitems() << "/" << 
                          _ninput_items_required[idx] << "/" << _input_ready[idx]);
            }
    
            if ( dr_cnt < istreams.size() ) {
                if ( outMultiple > 1 ) {
                    noutput_items -= outMultiple;
                } else {
                    noutput_items /= 2;
                }
            } else {
                dataReady = true;
            }
            LOG_TRACE( file_descriptor_sink_i_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
        }

        // check if data is ready...
        if ( !dataReady ) {
            LOG_TRACE( file_descriptor_sink_i_base, "DATA CHECK - NOT ENOUGH DATA  AVAIL/REQ:" 
                      <<   _istreams[0].nitems() << "/" << _ninput_items_required[0] );
            return -1;
        }

        // reset looping variables
        int  ritems = 0;
        int  nitems = 0;

        // reset caching vectors
        _output_items.clear();
        _input_items.clear();
        _ninput_items.clear();
        istream = istreams.begin();

        for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) {
            // check if the stream is ready
            if ( !_input_ready[idx] ) continue;
      
            // get number of items remaining
            try {
                ritems = gr_sptr->nitems_read( idx );
            } catch(...){
                // something bad has happened, we are missing an input stream
                LOG_ERROR( file_descriptor_sink_i_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" <<   istream->streamID );
                return -2;
            } 

            nitems = istream->nitems() - ritems;
            LOG_TRACE( file_descriptor_sink_i_base,  " ISTREAM: IDX:" << idx  << " ITEMS AVAIL/READ/REQ " << nitems << "/" 
                      << ritems << "/" << _ninput_items_required[idx] );
            if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) {
                //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0;
                _ninput_items.push_back( nitems );
                _input_items.push_back( (const void *) (istream->read_pointer(ritems)) );
            }
        }

        nout=0;
        if ( _input_items.size() != 0 && serviceThread->threadRunning() ) {
            LOG_TRACE( file_descriptor_sink_i_base, " CALLING WORK.....N_OUT:" << noutput_items << 
                      " N_IN:" << nitems << " ISTREAMS:" << _input_items.size() << 
                      " OSTREAMS:" << _output_items.size());
            nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items);

            // sink/analyzer patterns do not return items, so consume_each is not called in Gnu Radio BLOCK
            if ( nout == 0 ) {
                gr_sptr->consume_each(nitems);
            }

            LOG_TRACE( file_descriptor_sink_i_base, "RETURN  WORK ..... N_OUT:" << nout);
        }

        // check for stop condition from work method
        if ( nout < gr_block::WORK_DONE ) {
            LOG_WARN( file_descriptor_sink_i_base, "WORK RETURNED STOP CONDITION..." << nout );
            nout=0;
            eos = true;
        }
    }

    return nout;
 
}
Esempio n. 14
0
static void __init mx_cma_region_reserve(
    struct cma_region *regions_normal,
    struct cma_region *regions_secure)
{
    struct cma_region *reg;
    phys_addr_t paddr_last = 0xFFFFFFFF;

    for (reg = regions_normal; reg->size != 0; reg++) {
        phys_addr_t paddr;

        if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
            pr_err("S5P/CMA: size of '%s' is NOT page-aligned\n",
                   reg->name);
            reg->size = PAGE_ALIGN(reg->size);
        }

        if (reg->reserved) {
            pr_err("S5P/CMA: '%s' alread reserved\n", reg->name);
            continue;
        }

        if (reg->alignment) {
            if ((reg->alignment & ~PAGE_MASK) ||
                    (reg->alignment & ~reg->alignment)) {
                pr_err("S5P/CMA: Failed to reserve '%s': "
                       "incorrect alignment 0x%08x.\n",
                       reg->name, reg->alignment);
                continue;
            }
        } else {
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                    && (memblock_reserve(reg->start, reg->size) == 0))
                reg->reserved = 1;
            else
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
            continue;
        }

        paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                                       reg->size, reg->alignment);
        if (paddr != MEMBLOCK_ERROR) {
            if (memblock_reserve(paddr, reg->size)) {
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
                continue;
            }
            reg->start = paddr;
            reg->reserved = 1;
            pr_info("name = %s, paddr = 0x%x, size = %d\n", reg->name, paddr, reg->size);
        } else {
            pr_err("S5P/CMA: No free space in memory for '%s'\n",
                   reg->name);
        }

        if (cma_early_region_register(reg)) {
            pr_err("S5P/CMA: Failed to register '%s'\n",
                   reg->name);
            memblock_free(reg->start, reg->size);
        } else {
            paddr_last = min(paddr, paddr_last);
        }
    }

    if (regions_secure && regions_secure->size) {
        size_t size_secure = 0;
        size_t align_secure, size_region2, aug_size, order_region2;

        for (reg = regions_secure; reg->size != 0; reg++)
            size_secure += reg->size;

        reg--;

        /* Entire secure regions will be merged into 2
         * consecutive regions. */
        align_secure = 1 <<
                       (get_order((size_secure + 1) / 2) + PAGE_SHIFT);
        /* Calculation of a subregion size */
        size_region2 = size_secure - align_secure;
        order_region2 = get_order(size_region2) + PAGE_SHIFT;
        if (order_region2 < 20)
            order_region2 = 20; /* 1MB */
        order_region2 -= 3; /* divide by 8 */
        size_region2 = ALIGN(size_region2, 1 << order_region2);

        aug_size = align_secure + size_region2 - size_secure;
        if (aug_size > 0)
            reg->size += aug_size;

        size_secure = ALIGN(size_secure, align_secure);

        if (paddr_last >= memblock.current_limit) {
            paddr_last = memblock_find_in_range(0,
                                                MEMBLOCK_ALLOC_ACCESSIBLE,
                                                size_secure, reg->alignment);
        } else {
            paddr_last -= size_secure;
            paddr_last = round_down(paddr_last, align_secure);
        }

        if (paddr_last) {
            while (memblock_reserve(paddr_last, size_secure))
                paddr_last -= align_secure;

            do {
                reg->start = paddr_last;
                reg->reserved = 1;
                paddr_last += reg->size;

                if (cma_early_region_register(reg)) {
                    memblock_free(reg->start, reg->size);
                    pr_err("S5P/CMA: "
                           "Failed to register secure region "
                           "'%s'\n", reg->name);
                } else {
                    size_secure -= reg->size;
                }
            } while (reg-- != regions_secure);

            if (size_secure > 0)
                memblock_free(paddr_last, size_secure);
        } else {
            pr_err("S5P/CMA: Failed to reserve secure regions\n");
        }
    }
}
Esempio n. 15
0
/*
 * Map a shared object into memory.  The argument is a file descriptor,
 * which must be open on the object and positioned at its beginning.
 *
 * The return value is a pointer to a newly-allocated Obj_Entry structure
 * for the shared object.  Returns NULL on failure.
 */
Obj_Entry *
_rtld_map_object(const char *path, int fd, const struct stat *sb)
{
	Obj_Entry	*obj;
	Elf_Ehdr	*ehdr;
	Elf_Phdr	*phdr;
	size_t		 phsize;
	Elf_Phdr	*phlimit;
	Elf_Phdr	*segs[2];
	int		 nsegs;
	caddr_t		 mapbase = MAP_FAILED;
	size_t		 mapsize = 0;
	size_t		 bsssize = 0;
	int		 mapflags;
	Elf_Off		 base_offset;
#ifdef MAP_ALIGNED
	Elf_Addr	 base_alignment;
#endif
	Elf_Addr	 base_vaddr;
	Elf_Addr	 base_vlimit;
	Elf_Addr	 text_vlimit;
	int		 text_flags;
	caddr_t		 base_addr;
	Elf_Off		 data_offset;
	Elf_Addr	 data_vaddr;
	Elf_Addr	 data_vlimit;
	int		 data_flags;
	caddr_t		 data_addr;
	Elf_Addr	 phdr_vaddr;
	size_t		 phdr_memsz;
	caddr_t		 gap_addr;
	size_t		 gap_size;
	int i;
#ifdef RTLD_LOADER
	Elf_Addr	 clear_vaddr;
	caddr_t		 clear_addr;
	size_t		 nclear;
#endif

	if (sb != NULL && sb->st_size < (off_t)sizeof (Elf_Ehdr)) {
		_rtld_error("%s: unrecognized file format1", path);
		return NULL;
	}

	obj = _rtld_obj_new();
	obj->path = xstrdup(path);
	obj->pathlen = strlen(path);
	if (sb != NULL) {
		obj->dev = sb->st_dev;
		obj->ino = sb->st_ino;
	}

	ehdr = mmap(NULL, _rtld_pagesz, PROT_READ, MAP_FILE | MAP_SHARED, fd,
	    (off_t)0);
	obj->ehdr = ehdr;
	if (ehdr == MAP_FAILED) {
		_rtld_error("%s: read error: %s", path, xstrerror(errno));
		goto bad;
	}
	/* Make sure the file is valid */
	if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
	    ehdr->e_ident[EI_CLASS] != ELFCLASS) {
		_rtld_error("%s: unrecognized file format2 [%x != %x]", path,
		    ehdr->e_ident[EI_CLASS], ELFCLASS);
		goto bad;
	}
	/* Elf_e_ident includes class */
	if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
	    ehdr->e_version != EV_CURRENT ||
	    ehdr->e_ident[EI_DATA] != ELFDEFNNAME(MACHDEP_ENDIANNESS)) {
		_rtld_error("%s: unsupported file version", path);
		goto bad;
	}
	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
		_rtld_error("%s: unsupported file type", path);
		goto bad;
	}
	switch (ehdr->e_machine) {
		ELFDEFNNAME(MACHDEP_ID_CASES)
	default:
		_rtld_error("%s: unsupported machine", path);
		goto bad;
	}

	/*
         * We rely on the program header being in the first page.  This is
         * not strictly required by the ABI specification, but it seems to
         * always true in practice.  And, it simplifies things considerably.
         */
	assert(ehdr->e_phentsize == sizeof(Elf_Phdr));
	assert(ehdr->e_phoff + ehdr->e_phnum * sizeof(Elf_Phdr) <=
	    _rtld_pagesz);

	/*
         * Scan the program header entries, and save key information.
         *
         * We rely on there being exactly two load segments, text and data,
         * in that order.
         */
	phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
	phsize = ehdr->e_phnum * sizeof(phdr[0]);
	obj->phdr = NULL;
	phdr_vaddr = EA_UNDEF;
	phdr_memsz = 0;
	phlimit = phdr + ehdr->e_phnum;
	nsegs = 0;
	while (phdr < phlimit) {
		switch (phdr->p_type) {
		case PT_INTERP:
			obj->interp = (void *)(uintptr_t)phdr->p_vaddr;
 			dbg(("%s: PT_INTERP %p", obj->path, obj->interp));
			break;

		case PT_LOAD:
			if (nsegs < 2)
				segs[nsegs] = phdr;
			++nsegs;
			dbg(("%s: PT_LOAD %p", obj->path, phdr));
			break;

		case PT_PHDR:
			phdr_vaddr = phdr->p_vaddr;
			phdr_memsz = phdr->p_memsz;
			dbg(("%s: PT_PHDR %p phsize %zu", obj->path,
			    (void *)(uintptr_t)phdr_vaddr, phdr_memsz));
			break;
		
		case PT_DYNAMIC:
			obj->dynamic = (void *)(uintptr_t)phdr->p_vaddr;
 			dbg(("%s: PT_DYNAMIC %p", obj->path, obj->dynamic));
			break;
		}

		++phdr;
	}
	phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
	obj->entry = (void *)(uintptr_t)ehdr->e_entry;
	if (!obj->dynamic) {
		_rtld_error("%s: not dynamically linked", path);
		goto bad;
	}
	if (nsegs != 2) {
		_rtld_error("%s: wrong number of segments (%d != 2)", path,
		    nsegs);
		goto bad;
	}

	/*
	 * Map the entire address space of the object as a file
	 * region to stake out our contiguous region and establish a
	 * base for relocation.  We use a file mapping so that
	 * the kernel will give us whatever alignment is appropriate
	 * for the platform we're running on.
	 *
	 * We map it using the text protection, map the data segment
	 * into the right place, then map an anon segment for the bss
	 * and unmap the gaps left by padding to alignment.
	 */

#ifdef MAP_ALIGNED
	base_alignment = segs[0]->p_align;
#endif
	base_offset = round_down(segs[0]->p_offset);
	base_vaddr = round_down(segs[0]->p_vaddr);
	base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz);
	text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz);
	text_flags = protflags(segs[0]->p_flags);
	data_offset = round_down(segs[1]->p_offset);
	data_vaddr = round_down(segs[1]->p_vaddr);
	data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz);
	data_flags = protflags(segs[1]->p_flags);
#ifdef RTLD_LOADER
	clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
#endif

	obj->textsize = text_vlimit - base_vaddr;
	obj->vaddrbase = base_vaddr;
	obj->isdynamic = ehdr->e_type == ET_DYN;

	obj->phdr_loaded = false;
	for (i = 0; i < nsegs; i++) {
		if (phdr_vaddr != EA_UNDEF &&
		    segs[i]->p_vaddr <= phdr_vaddr &&
		    segs[i]->p_memsz >= phdr_memsz) {
			obj->phdr_loaded = true;
			break;
		}
		if (segs[i]->p_offset <= ehdr->e_phoff &&
		    segs[i]->p_memsz >= phsize) {
			phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff;
			phdr_memsz = phsize;
			obj->phdr_loaded = true;
			break;
		}
	}
	if (obj->phdr_loaded) {
		obj->phdr = (void *)(uintptr_t)phdr_vaddr;
		obj->phsize = phdr_memsz;
	} else {
		Elf_Phdr *buf;
		buf = xmalloc(phsize);
		if (buf == NULL) {
			_rtld_error("%s: cannot allocate program header", path);
			goto bad;
		}
		memcpy(buf, phdr, phsize);
		obj->phdr = buf;
		obj->phsize = phsize;
	}
	dbg(("%s: phdr %p phsize %zu (%s)", obj->path, obj->phdr, obj->phsize,
	     obj->phdr_loaded ? "loaded" : "allocated"));

	/* Unmap header if it overlaps the first load section. */
	if (base_offset < _rtld_pagesz) {
		munmap(ehdr, _rtld_pagesz);
		obj->ehdr = MAP_FAILED;
	}

	/*
	 * Calculate log2 of the base section alignment.
	 */
	mapflags = 0;
#ifdef MAP_ALIGNED
	if (base_alignment > _rtld_pagesz) {
		unsigned int log2 = 0;
		for (; base_alignment > 1; base_alignment >>= 1)
			log2++;
		mapflags = MAP_ALIGNED(log2);
	}
Esempio n. 16
0
// Set up the initial stack page for the new child process with envid 'child'
// using the arguments array pointed to by 'argv',
// which is a null-terminated array of pointers to '\0'-terminated strings.
//
// On success, returns 0 and sets *init_esp
// to the initial stack pointer with which the child should start.
// Returns < 0 on failure.
static int
init_stack(envid_t child, const char **argv, uintptr_t *init_esp)
{
	size_t string_size;
	int argc, i, r;
	char *string_store;
	uintptr_t *argv_store;

	// Count the number of arguments (argc)
	// and the total amount of space needed for strings (string_size).
	string_size = 0;
	for (argc = 0; argv[argc] != 0; argc++)
		string_size += strlen(argv[argc]) + 1;

	// Determine where to place the strings and the argv array.
	// We set up the 'string_store' and 'argv_store' pointers to point
	// into the temporary page at UTEMP.
	// Later, we'll remap that page into the child environment
	// at (USTACKTOP - PGSIZE).
	
	// strings is the topmost thing on the stack.
	string_store = (char *) UTEMP + PGSIZE - string_size;
	
	// argv is below that.  There's one argument pointer per argument, plus
	// a null pointer.
	argv_store = (uintptr_t*) (round_down(string_store, 4) - 4 * (argc + 1));
	
	// Make sure that argv, strings, and the 2 words that hold 'argc'
	// and 'argv' themselves will all fit in a single stack page.
	if ((void*) (argv_store - 2) < (void*) UTEMP)
		return -E_NO_MEM;

	// Allocate a page at UTEMP.
	if ((r = sys_page_alloc(0, (void*) UTEMP, PTE_P|PTE_U|PTE_W)) < 0)
		return r;

	// Replace this with your code to:
	//
	//	* Initialize 'argv_store[i]' to point to argument string i,
	//	  for all 0 <= i < argc.
	//	  Also, copy the argument strings from 'argv' into the
	//	  newly-allocated stack page.
	//	  Hint: Copy the argument strings into string_store.
	//	  Hint: Make sure that argv_store uses addresses valid in the
	//	  CHILD'S environment!  The string_store variable itself
	//	  points into page UTEMP, but the child environment will have
	//	  this page mapped at USTACKTOP - PGSIZE.  Check out the
	//	  utemp_addr_to_ustack_addr function defined above.
	//
	for(i = 0; i < argc; i++){
		argv_store[i] = UTEMP2USTACK(string_store);
		strcpy(string_store,argv[i]);
		string_store += strlen(argv[i])+1;
	}
	//	* Set 'argv_store[argc]' to 0 to null-terminate the args array.
	//
	argv_store[argc] = 0;
	//	* Push two more words onto the child's stack below 'args',
	//	  containing the argc and argv parameters to be passed
	//	  to the child's umain() function.
	//	  argv should be below argc on the stack.
	//	  (Again, argv should use an address valid in the child's
	//	  environment.)
	//
	argv_store[-1] = UTEMP2USTACK(argv_store);
	argv_store[-2] = argc;
	//	* Set *init_esp to the initial stack pointer for the child,
	//	  (Again, use an address valid in the child's environment.)
	//
	// LAB 4: Your code here.
	//*init_esp = USTACKTOP;	// Change this!
	*init_esp = UTEMP2USTACK(argv_store-2);
	// After completing the stack, map it into the child's address space
	// and unmap it from ours!
	if ((r = sys_page_map(0, (void*) UTEMP, child, (void*) (USTACKTOP - PGSIZE), PTE_P | PTE_U | PTE_W)) < 0)
		goto error;
	if ((r = sys_page_unmap(0, (void*) UTEMP)) < 0)
		goto error;

	return 0;

error:
	sys_page_unmap(0, (void*) UTEMP);
	return r;
}
Esempio n. 17
0
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);

			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr != MEMBLOCK_ERROR) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
			pr_info("S5P/CMA: "
				"Reserved 0x%08x/0x%08x for 'secure_region'\n",
				paddr_last, size_secure);
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif
			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else {
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");

#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);

				cma_region_descriptor_add(reg->name, reg->start, reg->size);

				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
Esempio n. 18
0
Box2di R2ISup(const Box2dr & aB)
{
   return Box2di(round_down(aB._p0),round_up(aB._p1));
}
Esempio n. 19
0
void
arch_detect_mem_map (mmap_info_t * mm_info, 
                     mem_map_entry_t * memory_map,
                     unsigned long mbd)
{
    struct multiboot_tag * tag;
    uint32_t n = 0;

    if (mbd & 7) {
        panic("ERROR: Unaligned multiboot info struct\n");
    }

    tag = (struct multiboot_tag*)(mbd+8);
    while (tag->type != MULTIBOOT_TAG_TYPE_MMAP) {
        tag = (struct multiboot_tag*)((multiboot_uint8_t*)tag + ((tag->size+7)&~7));
    }

    if (tag->type != MULTIBOOT_TAG_TYPE_MMAP) {
        panic("ERROR: no mmap tag found\n");
    }

    multiboot_memory_map_t * mmap;

    for (mmap=((struct multiboot_tag_mmap*)tag)->entries;
            (multiboot_uint8_t*)mmap < (multiboot_uint8_t*)tag + tag->size;
            mmap = (multiboot_memory_map_t*)((ulong_t)mmap + 
                ((struct multiboot_tag_mmap*)tag)->entry_size)) {


        if (n > MAX_MMAP_ENTRIES) {
            panic("Reached memory region limit!\n");
        }

        ulong_t start,end;

        start = round_up(mmap->addr, PAGE_SIZE_4KB);
        end   = round_down(mmap->addr + mmap->len, PAGE_SIZE_4KB);

        memory_map[n].addr = start;
        memory_map[n].len  = end-start;
        memory_map[n].type = mmap->type;

        BMM_PRINT("Memory map[%u] - [%p - %p] <%s>\n", 
                n, 
                start,
                end,
                mem_region_types[memory_map[n].type]);

        if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
            mm_info->usable_ram += mmap->len;
        }

        if (end > (mm_info->last_pfn << PAGE_SHIFT)) {
            mm_info->last_pfn = end >> PAGE_SHIFT;
        }

        mm_info->total_mem += end-start;

        ++n;
        ++mm_info->num_regions;
    }
Esempio n. 20
0
static Pt2di BoxDown(Pt2dr aP,INT  *) {return round_down(aP);}
Esempio n. 21
0
File: mmci.c Progetto: Lyude/linux
static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
	      unsigned int status)
{
	/* Make sure we have data to handle */
	if (!data)
		return;

	/* First check for errors */
	if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
		      host->variant->start_err |
		      MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
		u32 remain, success;

		/* Terminate the DMA transfer */
		if (dma_inprogress(host)) {
			mmci_dma_data_error(host);
			mmci_dma_unmap(host, data);
		}

		/*
		 * Calculate how far we are into the transfer.  Note that
		 * the data counter gives the number of bytes transferred
		 * on the MMC bus, not on the host side.  On reads, this
		 * can be as much as a FIFO-worth of data ahead.  This
		 * matters for FIFO overruns only.
		 */
		remain = readl(host->base + MMCIDATACNT);
		success = data->blksz * data->blocks - remain;

		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
			status, success);
		if (status & MCI_DATACRCFAIL) {
			/* Last block was not successful */
			success -= 1;
			data->error = -EILSEQ;
		} else if (status & MCI_DATATIMEOUT) {
			data->error = -ETIMEDOUT;
		} else if (status & MCI_STARTBITERR) {
			data->error = -ECOMM;
		} else if (status & MCI_TXUNDERRUN) {
			data->error = -EIO;
		} else if (status & MCI_RXOVERRUN) {
			if (success > host->variant->fifosize)
				success -= host->variant->fifosize;
			else
				success = 0;
			data->error = -EIO;
		}
		data->bytes_xfered = round_down(success, data->blksz);
	}

	if (status & MCI_DATABLOCKEND)
		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");

	if (status & MCI_DATAEND || data->error) {
		if (dma_inprogress(host))
			mmci_dma_finalize(host, data);
		mmci_stop_data(host);

		if (!data->error)
			/* The error clause is handled above, success! */
			data->bytes_xfered = data->blksz * data->blocks;

		if (!data->stop || host->mrq->sbc) {
			mmci_request_end(host, data->mrq);
		} else {
			mmci_start_command(host, data->stop, 0);
		}
	}
}
Esempio n. 22
0
// Load the 'ph' program segment of program 'id' into 'child's address
// space at the proper place.
//
// Returns 0 on success, < 0 on error.  It is also OK panic on error.
// There's no need to clean up page mappings after error; the caller will call
// sys_env_destroy(), which cleans up most mappings for us.
static int
load_segment(envid_t child, int id, struct Proghdr* ph,
	     struct Elf* elf, size_t elf_size)
{
	// Use the p_flags field in the Proghdr for each segment
	// to determine how to map the segment:
	//
	//    * If the ELF flags do not include ELF_PROG_FLAG_WRITE,
	//	then the segment contains text and read-only data.
	//	Use map_page() to map the contents of this segment
	//	directly into the child, so that multiple instances of
	//	the same program will share the same copy of the program text.
	//      Be sure to map the program text read-only in the child.
	//
	int err;
	if((ph->p_flags & ELF_PROG_FLAG_WRITE) == 0){
		uint32_t i;
		for(i = round_down(ph->p_offset,PGSIZE); i < round_up(ph->p_offset+ph->p_memsz,PGSIZE); i+= PGSIZE){
			void *dstva = (void *)(round_down(ph->p_va,PGSIZE)+i-round_down(ph->p_offset,PGSIZE));
			map_page(child, id, i, dstva, PTE_U|PTE_P);
		}
	}
	else{
	//    * If the ELF segment flags DO include ELF_PROG_FLAG_WRITE,
	//	then the segment contains read/write data and bss.
	//	As with load_elf(), such an ELF segment
	//	occupies p_memsz bytes in memory, but only the first
	//	p_filesz bytes of the segment are actually loaded
	//	from the executable file -- you must clear the rest to zero.
	//      For each page to be mapped for a read/write segment,
	//	allocate a page of memory at UTEMP in the current
	//	environment.  Then use map_page() to map that portion of
	//	the program at UTEMP2.  Then copy the data from UTEMP2 into
	//	the page at UTEMP, and/or use memset() to zero any non-loaded
	//	portions of the page.  (You can avoid calling memset(),
	//	if you like, because sys_page_alloc() returns zeroed pages
	//	already.)  Finally, insert the correct page mapping into
	//	the child, and unmap the page at UTEMP2.
	//	Look at load_elf() and fork() for inspiration.
	//

		uint32_t i;
		for(i = round_down(ph->p_offset,PGSIZE); i < round_up(ph->p_offset+ph->p_memsz,PGSIZE); i+= PGSIZE)
		{
			if ((err = sys_page_alloc(0, (void*) UTEMP, PTE_P|PTE_U|PTE_W)) < 0)
				return err;
			//deal with bss
			if((i - round_down(ph->p_offset,PGSIZE)) <= ph->p_filesz ){
				int size, offset;
				offset = i - round_down(ph->p_offset, PGSIZE);
				//modify the size
				size = (ph->p_filesz - offset) > PGSIZE ? PGSIZE : (ph->p_filesz - offset); 
				map_page(0, id, i, (void *)(UTEMP2), PTE_U|PTE_P);
				memcpy((void *)UTEMP, (void *)(UTEMP2), size);
			}	
			//memset((void *)(UTEMP2),0,PGSIZE);
			void *dstva = (void *)(round_down(ph->p_va,PGSIZE)+i-round_down(ph->p_offset,PGSIZE));
			sys_page_map(0,(void *)UTEMP,child,dstva,PTE_P|PTE_U|PTE_W);
			sys_page_unmap(0,(void*)UTEMP);
		}


	// Note: All of the segment addresses or lengths above
	// might be non-page-aligned, so you must deal with
	// these non-page-aligned values appropriately.
	// The ELF linker does, however, guarantee that no two segments
	// will overlap on the same page; and it guarantees that
	// PGOFF(ph->p_offset) == PGOFF(ph->p_va).
	}
	return 0;
	// LAB 4: Your code here.
	//panic("load_segment not completed!\n");
	//return -1;
}
static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
{
	struct peri_clk_data *peri;
	struct bcm_clk_policy *policy;
	struct bcm_clk_gate *gate;
	struct bcm_clk_hyst *hyst;
	struct bcm_clk_div *div;
	struct bcm_clk_sel *sel;
	struct bcm_clk_trig *trig;
	const char *name;
	u32 range;
	u32 limit;

	BUG_ON(bcm_clk->type != bcm_clk_peri);
	peri = bcm_clk->u.peri;
	name = bcm_clk->init_data.name;
	range = bcm_clk->ccu->range;

	limit = range - sizeof(u32);
	limit = round_down(limit, sizeof(u32));

	policy = &peri->policy;
	if (policy_exists(policy)) {
		if (policy->offset > limit) {
			pr_err("%s: bad policy offset for %s (%u > %u)\n",
				__func__, name, policy->offset, limit);
			return false;
		}
	}

	gate = &peri->gate;
	hyst = &peri->hyst;
	if (gate_exists(gate)) {
		if (gate->offset > limit) {
			pr_err("%s: bad gate offset for %s (%u > %u)\n",
				__func__, name, gate->offset, limit);
			return false;
		}

		if (hyst_exists(hyst)) {
			if (hyst->offset > limit) {
				pr_err("%s: bad hysteresis offset for %s "
					"(%u > %u)\n", __func__,
					name, hyst->offset, limit);
				return false;
			}
		}
	} else if (hyst_exists(hyst)) {
		pr_err("%s: hysteresis but no gate for %s\n", __func__, name);
		return false;
	}

	div = &peri->div;
	if (divider_exists(div)) {
		if (div->u.s.offset > limit) {
			pr_err("%s: bad divider offset for %s (%u > %u)\n",
				__func__, name, div->u.s.offset, limit);
			return false;
		}
	}

	div = &peri->pre_div;
	if (divider_exists(div)) {
		if (div->u.s.offset > limit) {
			pr_err("%s: bad pre-divider offset for %s "
					"(%u > %u)\n",
				__func__, name, div->u.s.offset, limit);
			return false;
		}
	}

	sel = &peri->sel;
	if (selector_exists(sel)) {
		if (sel->offset > limit) {
			pr_err("%s: bad selector offset for %s (%u > %u)\n",
				__func__, name, sel->offset, limit);
			return false;
		}
	}

	trig = &peri->trig;
	if (trigger_exists(trig)) {
		if (trig->offset > limit) {
			pr_err("%s: bad trigger offset for %s (%u > %u)\n",
				__func__, name, trig->offset, limit);
			return false;
		}
	}

	trig = &peri->pre_trig;
	if (trigger_exists(trig)) {
		if (trig->offset > limit) {
			pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
				__func__, name, trig->offset, limit);
			return false;
		}
	}

	return true;
}
Esempio n. 24
0
static int decompress(struct nx842_crypto_ctx *ctx,
		      struct nx842_crypto_param *p,
		      struct nx842_crypto_header_group *g,
		      struct nx842_constraints *c,
		      u16 ignore)
{
	unsigned int slen = be32_to_cpu(g->compressed_length);
	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
	unsigned int dlen = p->oremain, tmplen;
	unsigned int adj_slen = slen;
	u8 *src = p->in, *dst = p->out;
	u16 padding = be16_to_cpu(g->padding);
	int ret, spadding = 0;
	ktime_t timeout;

	if (!slen || !required_len)
		return -EINVAL;

	if (p->iremain <= 0 || padding + slen > p->iremain)
		return -EOVERFLOW;

	if (p->oremain <= 0 || required_len - ignore > p->oremain)
		return -ENOSPC;

	src += padding;

	if (slen % c->multiple)
		adj_slen = round_up(slen, c->multiple);
	if (slen < c->minimum)
		adj_slen = c->minimum;
	if (slen > c->maximum)
		goto usesw;
	if (slen < adj_slen || (u64)src % c->alignment) {
		/* we can append padding bytes because the 842 format defines
		 * an "end" template (see lib/842/842_decompress.c) and will
		 * ignore any bytes following it.
		 */
		if (slen < adj_slen)
			memset(ctx->sbounce + slen, 0, adj_slen - slen);
		memcpy(ctx->sbounce, src, slen);
		src = ctx->sbounce;
		spadding = adj_slen - slen;
		slen = adj_slen;
		pr_debug("using decomp sbounce buffer, len %x\n", slen);
	}

	if (dlen % c->multiple)
		dlen = round_down(dlen, c->multiple);
	if (dlen < required_len || (u64)dst % c->alignment) {
		dst = ctx->dbounce;
		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
	}
	if (dlen < c->minimum)
		goto usesw;
	if (dlen > c->maximum)
		dlen = c->maximum;

	tmplen = dlen;
	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
	do {
		dlen = tmplen; /* reset dlen, if we're retrying */
		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
	if (ret) {
usesw:
		/* reset everything, sw doesn't have constraints */
		src = p->in + padding;
		slen = be32_to_cpu(g->compressed_length);
		spadding = 0;
		dst = p->out;
		dlen = p->oremain;
		if (dlen < required_len) { /* have ignore bytes */
			dst = ctx->dbounce;
			dlen = BOUNCE_BUFFER_SIZE;
		}
		pr_info_ratelimited("using software 842 decompression\n");
		ret = sw842_decompress(src, slen, dst, &dlen);
	}
	if (ret)
		return ret;

	slen -= spadding;

	dlen -= ignore;
	if (ignore)
		pr_debug("ignoring last %x bytes\n", ignore);

	if (dst == ctx->dbounce)
		memcpy(p->out, dst, dlen);

	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
		 slen, padding, dlen, ignore);

	return update_param(p, slen + padding, dlen);
}
Esempio n. 25
0
static void compute_kernel_transform(const struct kernel_transform_context context[restrict static 1],
	size_t input_channel,       size_t output_channels_subblock_start,
	size_t input_channel_range, size_t output_channels_subblock_size)
{
	const size_t tuple_elements           = context->tuple_elements;
	const size_t output_channels          = context->output_channels;
	const size_t input_channels           = context->input_channels;
	const size_t input_channels_block_max = context->input_channels_block_max;
	const struct nnp_size kernel_size     = context->kernel_size;

	const float (*kernel)[input_channels][kernel_size.width * kernel_size.height] =
		(const float(*)[input_channels][kernel_size.width * kernel_size.height]) context->kernel;
	float* kernel_transform             = context->kernel_transform;
	nnp_transform_2d transform_function = context->transform_function;

	const size_t input_channels_block_start = round_down(input_channel, input_channels_block_max);
	const size_t input_channels_block_size = min(input_channels - input_channels_block_start, input_channels_block_max);
	const size_t input_channels_block_offset = input_channel - input_channels_block_start;

	for (size_t output_channels_subblock_offset = 0; output_channels_subblock_offset < output_channels_subblock_size; output_channels_subblock_offset += 1) {
		const size_t output_channel = output_channels_subblock_start + output_channels_subblock_offset;
		transform_function(
			kernel[output_channel][input_channel],
			kernel_transform +
				(input_channels_block_start * output_channels + output_channels_subblock_start * input_channels_block_size + input_channels_block_offset * output_channels_subblock_size + output_channels_subblock_offset) * tuple_elements,
			kernel_size.width,
			output_channels * input_channels * tuple_elements * sizeof(float),
			kernel_size.height, kernel_size.width, 0, 0);
	}
}
Esempio n. 26
0
inline static int round_up(int v, int m)
{
    return round_down(v + m - 1, m);
}
Esempio n. 27
0
void bench_graphe_grille(Pt2di sz,TGr41 & gr)
{
       TPtSubGr41 GrAll;
       TPart41    Part;

       all_face_trigo
       (
            gr,
            (TSubGr41 &) GrAll,
            Part
       );

       ElFifo <Pt2dr> Fp2(2,true);

       BENCH_ASSERT(Part.nb() ==  ((sz.x-1)*(sz.y-1) +1));
       INT nb4 =0;
       INT perim_th = (sz.x-1 + sz.y-1) * 2;

       for (INT f=0; f< Part.nb() ; f++)
       {
           INT nbf = Part[f].nb();
           BENCH_ASSERT((nbf == 4) || (nbf==perim_th));

           copy_on(Fp2,Part[f],PtArc41);
           REAL surf = surf_or_poly(Fp2);
           Pt2dr cdg =  barrycentre(Fp2);

           if (nbf == 4)
           {
               nb4 ++;
               BENCH_ASSERT (std::abs(std::abs(surf)-1.0) < epsilon);
               REAL x = cdg.x - round_down(cdg.x);
               REAL y = cdg.y - round_down(cdg.y);
               BENCH_ASSERT
               (
                       (std::abs(x-0.5) < epsilon)
                   &&  (std::abs(y-0.5) < epsilon)
               );
           }
           else
           {
                BENCH_ASSERT (std::abs(surf-(sz.x-1)*(sz.y-1))< epsilon);
                BENCH_ASSERT(euclid(cdg,(Pt2dr(sz)-Pt2dr(1,1))/2.0) < epsilon);
           }
       }
       BENCH_ASSERT(Part.nb() == nb4+1);

       TSubGr41_xy2 Gr_xy2;
       all_face_trigo
       (
            gr,
            (TSubGr41 &) Gr_xy2,
            Part
       );
       BENCH_ASSERT(Part.nb() ==  (((sz.x-1)/2)*((sz.y-1)/2) +1));


       TSubGr41_xeg    Gr_xeq;
       all_face_trigo
       (
            gr,
            (TSubGr41 &) Gr_xeq,
            Part
       );
       BENCH_ASSERT(Part.nb() ==  sz.x);


       TSubGr41_xeg_y0    Gr_xeq_y0;
       all_face_trigo
       (
            gr,
            (TSubGr41 &) Gr_xeq_y0,
            Part
       );
       BENCH_ASSERT(Part.nb() ==  1);

}
Esempio n. 28
0
static int compress(struct nx842_crypto_ctx *ctx,
		    struct nx842_crypto_param *p,
		    struct nx842_crypto_header_group *g,
		    struct nx842_constraints *c,
		    u16 *ignore,
		    unsigned int hdrsize)
{
	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
	unsigned int adj_slen = slen;
	u8 *src = p->in, *dst = p->out;
	int ret, dskip = 0;
	ktime_t timeout;

	if (p->iremain == 0)
		return -EOVERFLOW;

	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
		return -ENOSPC;

	if (slen % c->multiple)
		adj_slen = round_up(slen, c->multiple);
	if (slen < c->minimum)
		adj_slen = c->minimum;
	if (slen > c->maximum)
		adj_slen = slen = c->maximum;
	if (adj_slen > slen || (u64)src % c->alignment) {
		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
		slen = min(slen, BOUNCE_BUFFER_SIZE);
		if (adj_slen > slen)
			memset(ctx->sbounce + slen, 0, adj_slen - slen);
		memcpy(ctx->sbounce, src, slen);
		src = ctx->sbounce;
		slen = adj_slen;
		pr_debug("using comp sbounce buffer, len %x\n", slen);
	}

	dst += hdrsize;
	dlen -= hdrsize;

	if ((u64)dst % c->alignment) {
		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
		dst += dskip;
		dlen -= dskip;
	}
	if (dlen % c->multiple)
		dlen = round_down(dlen, c->multiple);
	if (dlen < c->minimum) {
nospc:
		dst = ctx->dbounce;
		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
		dlen = round_down(dlen, c->multiple);
		dskip = 0;
		pr_debug("using comp dbounce buffer, len %x\n", dlen);
	}
	if (dlen > c->maximum)
		dlen = c->maximum;

	tmplen = dlen;
	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
	do {
		dlen = tmplen; /* reset dlen, if we're retrying */
		ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
		/* possibly we should reduce the slen here, instead of
		 * retrying with the dbounce buffer?
		 */
		if (ret == -ENOSPC && dst != ctx->dbounce)
			goto nospc;
	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
	if (ret)
		return ret;

	dskip += hdrsize;

	if (dst == ctx->dbounce)
		memcpy(p->out + dskip, dst, dlen);

	g->padding = cpu_to_be16(dskip);
	g->compressed_length = cpu_to_be32(dlen);
	g->uncompressed_length = cpu_to_be32(slen);

	if (p->iremain < slen) {
		*ignore = slen - p->iremain;
		slen = p->iremain;
	}

	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
		 slen, *ignore, dlen, dskip);

	return update_param(p, slen, dskip + dlen);
}
template <  typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int randomizer_base::_forecastAndProcess( bool &eos, typename  std::vector< gr_istream< IN_PORT_TYPE > > &istreams ,
                                 typename  std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams  )
{
    typedef typename std::vector< gr_istream< IN_PORT_TYPE > >   _IStreamList;
    typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > >  _OStreamList;

    typename _OStreamList::iterator ostream;
    typename _IStreamList::iterator istream = istreams.begin();
    int nout = 0;
    bool dataReady = false;
    if ( !eos ) {
        uint64_t max_items_avail = 0;
        for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) {
            LOG_TRACE( randomizer_base, "GET MAX ITEMS: STREAM:"<< idx << " NITEMS/SCALARS:" << 
                       istream->nitems() << "/" << istream->_data.size() );
            max_items_avail = std::max( istream->nitems(), max_items_avail );
        }

        if ( max_items_avail == 0  ) {
            LOG_TRACE( randomizer_base, "DATA CHECK - MAX ITEMS  NOUTPUT/MAX_ITEMS:" <<   noutput_items << "/" << max_items_avail);
            return -1;
        }

        //
        // calc number of output elements based on input items available
        //
        noutput_items = 0;
        if ( !gr_sptr->fixed_rate() )  {
            noutput_items = round_down((int32_t) (max_items_avail * gr_sptr->relative_rate()), gr_sptr->output_multiple());
            LOG_TRACE( randomizer_base, " VARIABLE FORECAST NOUTPUT == " << noutput_items );
        } else {
            istream = istreams.begin();
            for ( int i=0; istream != istreams.end(); i++, istream++ ) {
                int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() );
                if ( gr_sptr->output_multiple_set() ) {
                    t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple());
                }
                if ( t_noutput_items > 0 ) {
                    if ( noutput_items == 0 ) {
                        noutput_items = t_noutput_items;
                    }
                    if ( t_noutput_items <= noutput_items ) {
                        noutput_items = t_noutput_items;
                    }
                }
            }
            LOG_TRACE( randomizer_base,  " FIXED FORECAST NOUTPUT/output_multiple == " << 
                        noutput_items  << "/" << gr_sptr->output_multiple());
        }

        //
        // ask the block how much input they need to produce noutput_items...
        // if enough data is available to process then set the dataReady flag
        //
        int32_t  outMultiple = gr_sptr->output_multiple();
        while ( !dataReady && noutput_items >= outMultiple  ) {
            //
            // ask the block how much input they need to produce noutput_items...
            //
            gr_sptr->forecast(noutput_items, _ninput_items_required);

            LOG_TRACE( randomizer_base, "--> FORECAST IN/OUT " << _ninput_items_required[0]  << "/" << noutput_items  );

            istream = istreams.begin();
            uint32_t dr_cnt=0;
            for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) {
                // check if buffer has enough elements
                _input_ready[idx] = false;
                if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) {
                    _input_ready[idx] = true;
                    dr_cnt++;
                }
                LOG_TRACE( randomizer_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" <<   istream->nelems() << 
                          "/" << istream->nitems() << "/" << _ninput_items_required[idx] << "/" << _input_ready[idx]);
            }
    
            if ( dr_cnt < istreams.size() ) {
                if ( outMultiple > 1 ) {
                    noutput_items -= outMultiple;
                } else {
                    noutput_items /= 2;
                }
            } else {
                dataReady = true;
            }
            LOG_TRACE( randomizer_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady );
        }

        // check if data is ready...
        if ( !dataReady ) {
            LOG_TRACE( randomizer_base, "DATA CHECK - NOT ENOUGH DATA  AVAIL/REQ:" <<   _istreams[0].nitems() << 
                      "/" << _ninput_items_required[0] );
            return -1;
        }

        // reset looping variables
        int  ritems = 0;
        int  nitems = 0;

        // reset caching vectors
        _output_items.clear();
        _input_items.clear();
        _ninput_items.clear();
        istream = istreams.begin();

        for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) {
            // check if the stream is ready
            if ( !_input_ready[idx] ) {
                continue;
            }
            // get number of items remaining
            try {
                ritems = gr_sptr->nitems_read( idx );
            } catch(...){
                // something bad has happened, we are missing an input stream
                LOG_ERROR( randomizer_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" <<   istream->streamID );
                return -2;
            } 
    
            nitems = istream->nitems() - ritems;
            LOG_TRACE( randomizer_base,  " ISTREAM: IDX:" << idx  << " ITEMS AVAIL/READ/REQ " << nitems << "/" 
                       << ritems << "/" << _ninput_items_required[idx] );
            if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) {
                //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0;
                _ninput_items.push_back( nitems );
                _input_items.push_back( (const void *) (istream->read_pointer(ritems)) );
            }
        }

        //
        // setup output buffer vector based on noutput..
        //
        ostream = ostreams.begin();
        for( ; ostream != ostreams.end(); ostream++ ) {
            ostream->resize(noutput_items);
            _output_items.push_back((void*)(ostream->write_pointer()) );
        }

        nout=0;
        if ( _input_items.size() != 0 && serviceThread->threadRunning() ) {
            LOG_TRACE( randomizer_base, " CALLING WORK.....N_OUT:" << noutput_items << " N_IN:" << nitems 
                      << " ISTREAMS:" << _input_items.size() << " OSTREAMS:" << _output_items.size());
            nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items);
            LOG_TRACE( randomizer_base, "RETURN  WORK ..... N_OUT:" << nout);
        }

        // check for stop condition from work method
        if ( nout < gr_block::WORK_DONE ) {
            LOG_WARN( randomizer_base, "WORK RETURNED STOP CONDITION..." << nout );
            nout=0;
            eos = true;
        }
    }

    if (nout != 0 or eos ) {
        noutput_items = nout;
        LOG_TRACE( randomizer_base, " WORK RETURNED: NOUT : " << nout << " EOS:" << eos);
        ostream = ostreams.begin();
        typename IN_PORT_TYPE::dataTransfer *pkt=NULL;
        for ( int idx=0 ; ostream != ostreams.end(); idx++, ostream++ ) {

            pkt=NULL;
            int inputIdx = idx;
            if ( (size_t)(inputIdx) >= istreams.size() ) {
                for ( inputIdx= istreams.size()-1; inputIdx > -1; inputIdx--) {
                    if ( istreams[inputIdx].pkt != NULL ) {
                        pkt = istreams[inputIdx].pkt;
                        break;
                    }
                }
            } else {
                pkt = istreams[inputIdx].pkt;
            }

            LOG_TRACE( randomizer_base,  "PUSHING DATA   ITEMS/STREAM_ID " << ostream->nitems() << "/" << ostream->streamID );    
            if ( _maintainTimeStamp ) {

                // set time stamp for output samples based on input time stamp
                if ( ostream->nelems() == 0 )  {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "SEED - TS SRI:  xdelta:" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "OSTREAM WRITE:   maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  ostream->tstamp.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  ostream->tstamp.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  ostream->tstamp.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << ostream->tstamp.twsec );
      LOG_DEBUG( randomizer_base, "SEED - TS         frac:" <<  std::setprecision(12) << ostream->tstamp.tfsec );
#endif
                    ostream->setTimeStamp( pkt->T, _maintainTimeStamp );
                }

                // write out samples, and set next time stamp based on xdelta and  noutput_items
                ostream->write ( noutput_items, eos );
            } else {
// use incoming packet's time stamp to forward
                if ( pkt ) {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "OSTREAM  SRI:  items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "PKT - TS         maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  pkt->T.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  pkt->T.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  pkt->T.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << pkt->T.twsec );
      LOG_DEBUG( randomizer_base, "PKT - TS          frac:" <<  std::setprecision(12) << pkt->T.tfsec );
#endif
                    ostream->write( noutput_items, eos, pkt->T  );
                } else {
#ifdef TEST_TIME_STAMP
      LOG_DEBUG( randomizer_base, "OSTREAM  SRI:  items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta );
      LOG_DEBUG( randomizer_base, "OSTREAM TOD      maint:" << _maintainTimeStamp );
      LOG_DEBUG( randomizer_base, "                  mode:" <<  ostream->tstamp.tcmode );
      LOG_DEBUG( randomizer_base, "                status:" <<  ostream->tstamp.tcstatus );
      LOG_DEBUG( randomizer_base, "                offset:" <<  ostream->tstamp.toff );
      LOG_DEBUG( randomizer_base, "                 whole:" <<  std::setprecision(10) << ostream->tstamp.twsec );
      LOG_DEBUG( randomizer_base, "OSTREAM TOD       frac:" <<  std::setprecision(12) << ostream->tstamp.tfsec );
#endif
                    // use time of day as time stamp
                    ostream->write( noutput_items, eos,  _maintainTimeStamp );
                }
            }

        } // for ostreams
    }

    return nout;     
}
Esempio n. 30
0
static void mxr_graph_fix_geometry(struct mxr_layer *layer,
	enum mxr_geometry_stage stage, unsigned long flags)
{
	struct mxr_geometry *geo = &layer->geo;
	struct mxr_crop *src = &geo->src;
	struct mxr_crop *dst = &geo->dst;
	unsigned int x_center, y_center;

	switch (stage) {

	case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
		flags = 0;
		/* fall through */

	case MXR_GEOMETRY_COMPOSE:
		/* remember center of the area */
		x_center = dst->x_offset + dst->width / 2;
		y_center = dst->y_offset + dst->height / 2;
		/* round up/down to 2 multiple depending on flags */
		if (flags & V4L2_SEL_FLAG_LE) {
			dst->width = round_down(dst->width, 2);
			dst->height = round_down(dst->height, 2);
		} else {
			dst->width = round_up(dst->width, 2);
			dst->height = round_up(dst->height, 2);
		}
		/* assure that compose rect is inside display area */
		dst->width = min(dst->width, dst->full_width);
		dst->height = min(dst->height, dst->full_height);

		/* ensure that compose is reachable using 2x scaling */
		dst->width = min(dst->width, 2 * src->full_width);
		dst->height = min(dst->height, 2 * src->full_height);

		/* setup offsets */
		dst->x_offset = do_center(x_center, dst->width,
			dst->full_width, flags);
		dst->y_offset = do_center(y_center, dst->height,
			dst->full_height, flags);
		flags = 0;
		/* fall through */

	case MXR_GEOMETRY_CROP:
		/* remember center of the area */
		x_center = src->x_offset + src->width / 2;
		y_center = src->y_offset + src->height / 2;
		/* ensure that cropping area lies inside the buffer */
		if (src->full_width < dst->width)
			src->width = dst->width / 2;
		else
			src->width = closest(src->width, dst->width / 2,
				dst->width, flags);

		if (src->width == dst->width)
			geo->x_ratio = 0;
		else
			geo->x_ratio = 1;

		if (src->full_height < dst->height)
			src->height = dst->height / 2;
		else
			src->height = closest(src->height, dst->height / 2,
				dst->height, flags);

		if (src->height == dst->height)
			geo->y_ratio = 0;
		else
			geo->y_ratio = 1;

		/* setup offsets */
		src->x_offset = do_center(x_center, src->width,
			src->full_width, flags);
		src->y_offset = do_center(y_center, src->height,
			src->full_height, flags);
		flags = 0;
		/* fall through */
	case MXR_GEOMETRY_SOURCE:
		src->full_width = clamp_val(src->full_width,
			src->width + src->x_offset, 32767);
		src->full_height = clamp_val(src->full_height,
			src->height + src->y_offset, 2047);
	}
}