Пример #1
0
void ft_end_tree(struct ft_cxt *cxt)
{
	struct boot_param_header *bph = cxt->bph;
	char *p, *oldstr, *str, *endp;
	unsigned long ssize;
	int adj;

	if (!cxt->isordered)
		return;		/* we haven't touched anything */

	/* adjust string offsets */
	oldstr = cxt->rgn[FT_STRINGS].start;
	adj = cxt->str_anchor - oldstr;
	if (adj)
		adjust_string_offsets(cxt, adj);

	/* make strings end on 8-byte boundary */
	ssize = cxt->rgn[FT_STRINGS].size;
	endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start
			+ cxt->rgn[FT_STRUCT].size + ssize, 8);
	str = endp - ssize;

	/* move strings down to end of structs */
	memmove(str, oldstr, ssize);
	cxt->str_anchor = str;
	cxt->rgn[FT_STRINGS].start = str;

	/* fill in header fields */
	p = (char *)bph;
	bph->totalsize = cpu_to_be32(endp - p);
	bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p);
	bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p);
	bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p);
	bph->dt_strings_size = cpu_to_be32(ssize);
}
Пример #2
0
void mschunks_alloc(unsigned long num_chunks)
{
	klimit = _ALIGN(klimit, sizeof(u32));
	mschunks_map.mapping = (u32 *)klimit;
	klimit += num_chunks * sizeof(u32);
	mschunks_map.num_chunks = num_chunks;
}
Пример #3
0
static int
parse_result(struct servent *serv, char *buffer, size_t bufsize,
    char *resultbuf, size_t resultbuflen, int *errnop)
{
	char **aliases;
	int aliases_size;

	if (bufsize <= resultbuflen + _ALIGNBYTES + sizeof(char *)) {
		*errnop = ERANGE;
		return (NS_RETURN);
	}
	aliases = (char **)_ALIGN(&buffer[resultbuflen + 1]);
	aliases_size = (buffer + bufsize - (char *)aliases) / sizeof(char *);
	if (aliases_size < 1) {
		*errnop = ERANGE;
		return (NS_RETURN);
	}

	memcpy(buffer, resultbuf, resultbuflen);
	buffer[resultbuflen] = '\0';

	if (servent_unpack(buffer, serv, aliases, aliases_size, errnop) != 0)
		return ((*errnop == 0) ? NS_NOTFOUND : NS_RETURN);
	return (NS_SUCCESS);
}
/* Loads additional segments in case of a panic kernel is being loaded.
 * One segment for backup region, another segment for storing elf headers
 * for crash memory image.
 */
int load_crashdump_segments(struct kexec_info *info, char* mod_cmdline,
				unsigned long UNUSED(max_addr),
				unsigned long UNUSED(min_base))
{
	void *tmp;
	unsigned long sz, elfcorehdr;
	int nr_ranges, align = 1024;
	struct memory_range *mem_range;
	crash_create_elf_headers_func crash_create = crash_create_elf32_headers;
	struct crash_elf_info *elf_info = &elf_info32;
	unsigned long start_offset = 0x80000000UL;

#ifdef __mips64
	if (arch_options.core_header_type == CORE_TYPE_ELF64) {
		elf_info = &elf_info64;
		crash_create = crash_create_elf64_headers;
		start_offset = 0xffffffff80000000UL;
	}
#endif

	if (get_kernel_paddr(elf_info))
		return -1;

	if (get_kernel_vaddr_and_size(elf_info, start_offset))
		return -1;

	if (get_crash_memory_ranges(&mem_range, &nr_ranges) < 0)
		return -1;

	info->backup_src_start = BACKUP_SRC_START;
	info->backup_src_size = BACKUP_SRC_SIZE;
	/* Create a backup region segment to store backup data*/
	sz = _ALIGN(BACKUP_SRC_SIZE, align);
	tmp = xmalloc(sz);
	memset(tmp, 0, sz);
	info->backup_start = add_buffer(info, tmp, sz, sz, align,
				crash_reserved_mem.start,
				crash_reserved_mem.end, -1);

	if (crash_create(info, elf_info, crash_memory_range, nr_ranges,
			 &tmp, &sz, ELF_CORE_HEADER_ALIGN) < 0)
		return -1;
	elfcorehdr = add_buffer(info, tmp, sz, sz, align,
		crash_reserved_mem.start,
		crash_reserved_mem.end, -1);

	/*
	 * backup segment is after elfcorehdr, so use elfcorehdr as top of
	 * kernel's available memory
	 */
	cmdline_add_mem(mod_cmdline, crash_reserved_mem.start,
		elfcorehdr - crash_reserved_mem.start);
	cmdline_add_elfcorehdr(mod_cmdline, elfcorehdr);

	dbgprintf("CRASH MEMORY RANGES:\n");
	dbgprintf("%016Lx-%016Lx\n", crash_reserved_mem.start,
			crash_reserved_mem.end);
	return 0;
}
Пример #5
0
void zoin_hash(void *state, const void *input, uint32_t height)
{

	uint32_t _ALIGN(256) hash[16];

        LYRA2Z(hash, 32, input, 80, input, 80, 2, 330, 256);

	memcpy(state, hash, 32);
}
Пример #6
0
void jha_hash(void *output, const void *input)
{
	uint8_t _ALIGN(128) hash[64];

#ifdef NO_AES_NI
	sph_groestl512_context ctx_groestl;
#else
        hashState_groestl      ctx_groestl;
#endif
        sph_blake512_context ctx_blake;
	sph_jh512_context ctx_jh;
	sph_keccak512_context ctx_keccak;
	sph_skein512_context ctx_skein;

        memcpy( &ctx_keccak, &jha_kec_mid, sizeof jha_kec_mid );
        sph_keccak512(&ctx_keccak, input+64, 16 );
	sph_keccak512_close(&ctx_keccak, hash );

	// Heavy & Light Pair Loop
	for (int round = 0; round < 3; round++)
	{
	   if (hash[0] & 0x01)
           {
#ifdef NO_AES_NI
		sph_groestl512_init(&ctx_groestl);
		sph_groestl512(&ctx_groestl, hash, 64 );
		sph_groestl512_close(&ctx_groestl, hash );
#else
                init_groestl( &ctx_groestl, 64 );
                update_and_final_groestl( &ctx_groestl, (char*)hash,
                                          (char*)hash, 512 );
#endif
	    }
            else
            {
		sph_skein512_init(&ctx_skein);
		sph_skein512(&ctx_skein, hash, 64);
		sph_skein512_close(&ctx_skein, hash );
	    }

	    if (hash[0] & 0x01)
            {
		sph_blake512_init(&ctx_blake);
		sph_blake512(&ctx_blake, hash, 64);
		sph_blake512_close(&ctx_blake, hash );
	    }
            else
            {
		sph_jh512_init(&ctx_jh);
		sph_jh512(&ctx_jh, hash, 64 );
		sph_jh512_close(&ctx_jh, hash );
	    }
	}

	memcpy(output, hash, 32);
}
Пример #7
0
int scanhash_lyra2z(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
{

	size_t size = (int64_t) ((int64_t) 16 * 16 * 96);
    uint64_t *wholeMatrix = _mm_malloc(size, 64);

	uint32_t _ALIGN(128) hash[8];
	uint32_t _ALIGN(128) endiandata[20];
	uint32_t *pdata = work->data;
	uint32_t *ptarget = work->target;

	const uint32_t Htarg = ptarget[7];
	const uint32_t first_nonce = pdata[19];
	uint32_t nonce = first_nonce;

	if (opt_benchmark)
		ptarget[7] = 0x0000ff;

	for (int i=0; i < 19; i++) {
		be32enc(&endiandata[i], pdata[i]);
	}

	do {
		be32enc(&endiandata[19], nonce);
		lyra2z_hash(wholeMatrix, hash, endiandata);
//		lyra2z_hash(0, hash, endiandata);

		if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
			work_set_target_ratio(work, hash);
			pdata[19] = nonce;
			*hashes_done = pdata[19] - first_nonce;
			_mm_free(wholeMatrix);
			return 1;
		}
		nonce++;

	} while (nonce < max_nonce && !work_restart[thr_id].restart);

	pdata[19] = nonce;
	*hashes_done = pdata[19] - first_nonce + 1;
	_mm_free(wholeMatrix);
	return 0;
}
Пример #8
0
//
//	Returns list of buffers specifying free space within PE sections with the specified SectionFlags.
//
PLINKED_BUFFER PeSupGetSectionFreeBuffers(
	IN	HMODULE	TargetModule,	// module to scan sections within
	IN	ULONG	SectionFlags	// section flags
	)
{
	PLINKED_BUFFER FirstBuf = NULL;
	PLINKED_BUFFER LastBuf = NULL;
	PLINKED_BUFFER NewBuf = NULL;
	PCHAR DosHeader = (PCHAR)TargetModule;
	PIMAGE_NT_HEADERS Pe = (PIMAGE_NT_HEADERS)(DosHeader + ((PIMAGE_DOS_HEADER)DosHeader)->e_lfanew);
	PIMAGE_SECTION_HEADER Section = IMAGE_FIRST_SECTION(Pe);
	ULONG NumberOfSections = Pe->FileHeader.NumberOfSections;

	do 
	{
		if (Section->Characteristics & SectionFlags)
		{
			ULONG	RealSize = _ALIGN(Section->SizeOfRawData, Pe->OptionalHeader.FileAlignment);
			ULONG	VirtualSize = max(_ALIGN(Section->Misc.VirtualSize, PAGE_SIZE), _ALIGN(RealSize, PAGE_SIZE));
			ULONG	BufferSize;

			if (Section->Characteristics & IMAGE_SCN_MEM_DISCARDABLE)			
				RealSize = 0;
			
			BufferSize = VirtualSize - RealSize;

			if ((BufferSize) && (NewBuf = (PLINKED_BUFFER)AppAlloc(sizeof(LINKED_BUFFER))))
			{
				NewBuf->Next = NULL;
				NewBuf->Buffer = DosHeader + Section->VirtualAddress + RealSize;
				NewBuf->Size = BufferSize;
				if (FirstBuf == NULL)
					FirstBuf = NewBuf;
				else
					LastBuf->Next = NewBuf;
				LastBuf = NewBuf;
			}
		}	// if (Section->Characteristics & SectionFlags)
		Section += 1;
		NumberOfSections -= 1;
	} while (NumberOfSections);
	return(FirstBuf);
}
Пример #9
0
int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
		const void *buf, const unsigned int buflen)
{
	struct ft_atom atom;
	void *node;
	char *p, *next;
	int nextra, depth;

	node = ft_node_ph2node(cxt, phandle);
	if (node == NULL)
		return -1;

	depth = 0;
	p = node;

	while ((next = ft_next(cxt, p, &atom)) != NULL) {
		switch (atom.tag) {
		case OF_DT_BEGIN_NODE:
			++depth;
			break;
		case OF_DT_END_NODE:
			if (--depth > 0)
				break;
			/* haven't found the property, insert here */
			cxt->p = p;
			return ft_prop(cxt, propname, buf, buflen);
		case OF_DT_PROP:
			if ((depth != 1) || strcmp(atom.name, propname))
				break;
			/* found an existing property, overwrite it */
			nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4);
			cxt->p = atom.data;
			if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT,
						nextra))
				return -1;
			*(u32 *) (cxt->p - 8) = cpu_to_be32(buflen);
			ft_put_bin(cxt, buf, buflen);
			return 0;
		}
		p = next;
	}
	return -1;
}
Пример #10
0
int scanhash_x16r(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
{
	uint32_t _ALIGN(128) hash32[8];
	uint32_t _ALIGN(128) endiandata[20];
	uint32_t *pdata = work->data;
	uint32_t *ptarget = work->target;
	const uint32_t Htarg = ptarget[7];
	const uint32_t first_nonce = pdata[19];
	uint32_t nonce = first_nonce;
	volatile uint8_t *restart = &(work_restart[thr_id].restart);

	for (int k=0; k < 19; k++)
		be32enc(&endiandata[k], pdata[k]);

	if (s_ntime != pdata[17]) {
		uint32_t ntime = swab32(pdata[17]);
		getAlgoString((const char*) (&endiandata[1]), hashOrder);
		s_ntime = ntime;
		if (opt_debug && !thr_id) applog(LOG_DEBUG, "hash order %s (%08x)", hashOrder, ntime);
	}

	if (opt_benchmark)
		ptarget[7] = 0x0cff;

	do {
		be32enc(&endiandata[19], nonce);
		x16r_hash(hash32, endiandata);

		if (hash32[7] <= Htarg && fulltest(hash32, ptarget)) {
			work_set_target_ratio(work, hash32);
			pdata[19] = nonce;
			*hashes_done = pdata[19] - first_nonce;
			return 1;
		}
		nonce++;

	} while (nonce < max_nonce && !(*restart));

	pdata[19] = nonce;
	*hashes_done = pdata[19] - first_nonce + 1;
	return 0;
}
Пример #11
0
int ft_begin_node(struct ft_cxt *cxt, const char *name)
{
	unsigned long nlen = strlen(name) + 1;
	unsigned long len = 8 + _ALIGN(nlen, 4);

	if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
		return -1;
	ft_put_word(cxt, OF_DT_BEGIN_NODE);
	ft_put_bin(cxt, name, strlen(name) + 1);
	return 0;
}
Пример #12
0
unsigned long __init of_get_flat_dt_root(void)
{
	unsigned long p = ((unsigned long)initial_boot_params) +
		initial_boot_params->off_dt_struct;

	while(*((u32 *)p) == OF_DT_NOP)
		p += 4;
	BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE);
	p += 4;
	return _ALIGN(p + strlen((char *)p) + 1, 4);
}
Пример #13
0
static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
				       unsigned long align)
{
	void *res;

	*mem = _ALIGN(*mem, align);
	res = (void *)*mem;
	*mem += size;

	return res;
}
Пример #14
0
// hash exactly 64 bytes (ie, sha256 block size)
static void sha256_hash512(uint32_t *hash, const uint32_t *data)
{
	uint32_t _ALIGN(64) S[16];
	uint32_t _ALIGN(64) T[64];
	uchar _ALIGN(64) E[64*4] = { 0 };
	int i;

	sha256_init(S);

	for (i = 0; i < 16; i++)
		T[i] = be32dec(&data[i]);
	sha256_transform_volatile(S, T);

	E[3]  = 0x80;
	E[61] = 0x02; // T[15] = 8 * 64 => 0x200;
	sha256_transform_volatile(S, (uint32_t*)E);

	for (i = 0; i < 8; i++)
		be32enc(&hash[i], S[i]);
}
Пример #15
0
int scanhash_blake(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
{
	uint32_t _ALIGN(128) hash32[8];
	uint32_t _ALIGN(128) endiandata[20];
	uint32_t *pdata = work->data;
	uint32_t *ptarget = work->target;

	const uint32_t first_nonce = pdata[19];
	const uint32_t HTarget = opt_benchmark ? 0x7f : ptarget[7];

	uint32_t n = first_nonce;

	ctx_midstate_done = false;

	// we need big endian data...
	for (int kk=0; kk < 19; kk++) {
		be32enc(&endiandata[kk], pdata[kk]);
	}

#ifdef DEBUG_ALGO
	applog(LOG_DEBUG,"[%d] Target=%08x %08x", thr_id, ptarget[6], ptarget[7]);
#endif

	do {
		be32enc(&endiandata[19], n);
		blakehash(hash32, endiandata);

		if (hash32[7] <= HTarget && fulltest(hash32, ptarget)) {
			work_set_target_ratio(work, hash32);
			*hashes_done = n - first_nonce + 1;
			return 1;
		}

		n++; pdata[19] = n;

	} while (n < max_nonce && !work_restart[thr_id].restart);

	*hashes_done = n - first_nonce + 1;
	pdata[19] = n;
	return 0;
}
Пример #16
0
/* Print log debug data.  This appears after the location code.
 * We limit the number of debug logs in case the data is somehow corrupt.
 */
static void printk_log_debug(char *buf)
{
	unsigned char *p = (unsigned char *)_ALIGN((unsigned long)buf, 4);
	int len, n, logged;

	logged = 0;
	while ((logged < MAX_LOG_DEBUG) && (len = ((p[0] << 8) | p[1])) >= 4) {
		if (len > MAX_LOG_DEBUG_LEN)
			len = MAX_LOG_DEBUG_LEN;	/* bound it */
		printk("RTAS: Log Debug: %c%c ", p[2], p[3]);
		for (n=4; n < len; n++)
			printk("%02x", p[n]);
		printk("\n");
		p = (unsigned char *)_ALIGN((unsigned long)p+len, 4);
		logged++;
		if (len == MAX_LOG_DEBUG_LEN)
			return;	/* no point continuing */
	}
	if (logged == 0)
		printk("RTAS: no log debug data present\n");
}
Пример #17
0
static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz)
{
	unsigned long sza = _ALIGN(sz, 4);

	/* zero out the alignment gap if necessary */
	if (sz < sza)
		*(u32 *) (cxt->p + sza - 4) = 0;

	/* copy in the data */
	memcpy(cxt->p, data, sz);

	cxt->p += sza;
}
Пример #18
0
int scanhash_phi1612(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
{
	uint32_t _ALIGN(128) hash[8];
	uint32_t _ALIGN(128) endiandata[20];
	uint32_t *pdata = work->data;
	uint32_t *ptarget = work->target;

	const uint32_t Htarg = ptarget[7];
	const uint32_t first_nonce = pdata[19];
	uint32_t n = first_nonce;

	if(opt_benchmark){
		ptarget[7] = 0x00ff;
	}

	for (int i=0; i < 19; i++) {
		be32enc(&endiandata[i], pdata[i]);
	}

	do {
		be32enc(&endiandata[19], n);
		phi1612_hash(hash, endiandata);

		if (hash[7] < Htarg && fulltest(hash, ptarget)) {
			work_set_target_ratio(work, hash);
			*hashes_done = n - first_nonce + 1;
			pdata[19] = n;
			return 1;
		}
		n++;

	} while (n < max_nonce && !work_restart[thr_id].restart);

	*hashes_done = n - first_nonce + 1;
	pdata[19] = n;

	return 0;
}
Пример #19
0
static void sha256_hash(unsigned char *hash, const unsigned char *data, int len)
{
	uint32_t _ALIGN(64) S[16];
	uint32_t _ALIGN(64) T[64];
	int i, r;

	sha256_init(S);
	for (r = len; r > -9; r -= 64) {
		if (r < 64)
			memset(T, 0, 64);
		memcpy(T, data + len - r, r > 64 ? 64 : (r < 0 ? 0 : r));
		if (r >= 0 && r < 64)
			((unsigned char *)T)[r] = 0x80;
		for (i = 0; i < 16; i++)
			T[i] = be32dec(T + i);
		if (r < 56)
			T[15] = 8 * len;
		//sha256_transform(S, T, 0);
		sha256_transform_volatile(S, T);
	}
	for (i = 0; i < 8; i++)
		be32enc((uint32_t *)hash + i, S[i]);
}
Пример #20
0
void zfree(void *x, void *addr, unsigned nb)
{
    struct memchunk *mp = addr;

    nb = _ALIGN(nb, sizeof(struct memchunk));
    heap_use -= nb;
    if (avail_ram == addr + nb) {
        avail_ram = addr;
        return;
    }
    mp->size = nb;
    mp->next = freechunks;
    freechunks = mp;
}
Пример #21
0
int scanhash_veltor(int thr_id, struct work *work, uint32_t max_nonce, uint64_t *hashes_done)
{
	uint32_t _ALIGN(128) hash[8];
	uint32_t _ALIGN(128) endiandata[20];
	uint32_t *pdata = work->data;
	uint32_t *ptarget = work->target;

	const uint32_t Htarg = ptarget[7];
	const uint32_t first_nonce = pdata[19];
	uint32_t nonce = first_nonce;
	volatile uint8_t *restart = &(work_restart[thr_id].restart);

	if (opt_benchmark)
		ptarget[7] = 0x0cff;

	// we need bigendian data...
	for (int i=0; i < 19; i++) {
		be32enc(&endiandata[i], pdata[i]);
	}
	do {
		be32enc(&endiandata[19], nonce);
		veltorhash(hash, endiandata);

		if (hash[7] <= Htarg && fulltest(hash, ptarget)) {
			work_set_target_ratio(work, hash);
			pdata[19] = nonce;
			*hashes_done = pdata[19] - first_nonce;
			return 1;
		}
		nonce++;

	} while (nonce < max_nonce && !(*restart));

	pdata[19] = nonce;
	*hashes_done = pdata[19] - first_nonce + 1;
	return 0;
}
Пример #22
0
/**
 * This  function can be used within scan_flattened_dt callback to get
 * access to properties
 */
void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
				 unsigned long *size)
{
	unsigned long p = node;

	do {
		u32 tag = *((u32 *)p);
		u32 sz, noff;
		const char *nstr;

		p += 4;
		if (tag == OF_DT_NOP)
			continue;
		if (tag != OF_DT_PROP)
			return NULL;

		sz = *((u32 *)p);
		noff = *((u32 *)(p + 4));
		p += 8;
		if (initial_boot_params->version < 0x10)
			p = _ALIGN(p, sz >= 8 ? 8 : 4);

		nstr = find_flat_dt_string(noff);
		if (nstr == NULL) {
			printk(KERN_WARNING "Can't find property index"
			       " name !\n");
			return NULL;
		}
		if (strcmp(name, nstr) == 0) {
			if (size)
				*size = sz;
			return (void *)p;
		}
		p += sz;
		p = _ALIGN(p, 4);
	} while(1);
}
Пример #23
0
int scanhash_pluck(int thr_id, struct work *work, uint32_t max_nonce,
        uint64_t *hashes_done  )
{
        uint32_t *pdata = work->data;
        uint32_t *ptarget = work->target;
	uint32_t _ALIGN(64) endiandata[20];
	uint32_t _ALIGN(64) hash[8];
	const uint32_t first_nonce = pdata[19];
	volatile uint8_t *restart = &(work_restart[thr_id].restart);
	uint32_t n = first_nonce;

	if (opt_benchmark)
		((uint32_t*)ptarget)[7] = 0x0ffff;

        for (int i=0; i < 19; i++) 
                be32enc(&endiandata[i], pdata[i]);

	const uint32_t Htarg = ptarget[7];
	do {
		//be32enc(&endiandata[19], n);
		endiandata[19] = n;
		pluck_hash(hash, endiandata, scratchbuf, opt_pluck_n);

		if (hash[7] <= Htarg && fulltest(hash, ptarget))
		{
			*hashes_done = n - first_nonce + 1;
			pdata[19] = htobe32(endiandata[19]);
			return 1;
		}
		n++;
	} while (n < max_nonce && !(*restart));

	*hashes_done = n - first_nonce + 1;
	pdata[19] = n;
	return 0;
}
Пример #24
0
void skein2hash(void *output, const void *input)
{
	uint32_t _ALIGN(64) hash[16];
	sph_skein512_context ctx_skein;

	sph_skein512_init(&ctx_skein);
	sph_skein512(&ctx_skein, input, 80);
	sph_skein512_close(&ctx_skein, hash);

	sph_skein512_init(&ctx_skein);
	sph_skein512(&ctx_skein, hash, 64);
	sph_skein512_close(&ctx_skein, hash);

	memcpy(output, (void*) hash, 32);
}
Пример #25
0
static status_t
add_ancillary_data(net_socket* socket, ancillary_data_container* container,
	void* data, size_t dataLen)
{
	cmsghdr* header = (cmsghdr*)data;

	while (dataLen > 0) {
		if (header->cmsg_len < sizeof(cmsghdr) || header->cmsg_len > dataLen)
			return B_BAD_VALUE;

		if (socket->first_info->add_ancillary_data == NULL)
			return B_NOT_SUPPORTED;

		status_t status = socket->first_info->add_ancillary_data(
			socket->first_protocol, container, header);
		if (status != B_OK)
			return status;

		dataLen -= _ALIGN(header->cmsg_len);
		header = (cmsghdr*)((uint8*)header + _ALIGN(header->cmsg_len));
	}

	return B_OK;
}
Пример #26
0
static void droplp_hash_pok(void *output, uint32_t *pdata, const uint32_t version)
{
	uint32_t _ALIGN(64) hash[8];
	uint32_t pok;

	pdata[0] = version;
	droplp_hash(hash, pdata);

	// fill PoK
	pok = version | (hash[0] & POK_DATA_MASK);
	if (pdata[0] != pok) {
		pdata[0] = pok;
		droplp_hash(hash, pdata);
	}
	memcpy(output, hash, 32);
}
static void expand_buf(int minexpand)
{
	int size = fdt_totalsize(fdt);
	int rc;

	size = _ALIGN(size + minexpand, EXPAND_GRANULARITY);
	buf = platform_ops.realloc(buf, size);
	if (!buf)
		fatal("Couldn't find %d bytes to expand device tree\n\r", size);
	rc = fdt_open_into(fdt, buf, size);
	if (rc != 0)
		fatal("Couldn't expand fdt into new buffer: %s\n\r",
		      fdt_strerror(rc));

	fdt = buf;
}
Пример #28
0
void myriadhash(void *state, const void *input)
{
	uint32_t _ALIGN(64) hash[16];
	sph_groestl512_context ctx_groestl;
	SHA256_CTX sha256;

	sph_groestl512_init(&ctx_groestl);
	sph_groestl512(&ctx_groestl, input, 80);
	sph_groestl512_close(&ctx_groestl, hash);

	SHA256_Init(&sha256);
	SHA256_Update(&sha256,(unsigned char *)hash, 64);
	SHA256_Final((unsigned char *)hash, &sha256);

	memcpy(state, hash, 32);
}
Пример #29
0
/* Copy the tree to a newly-allocated region and put things in order */
static int ft_reorder(struct ft_cxt *cxt, int nextra)
{
	unsigned long tot;
	enum ft_rgn_id r;
	char *p, *pend;
	int stroff;

	tot = HDR_SIZE + EXPAND_INCR;
	for (r = FT_RSVMAP; r <= FT_STRINGS; ++r)
		tot += cxt->rgn[r].size;
	if (nextra > 0)
		tot += nextra;
	tot = _ALIGN(tot, 8);

	if (!cxt->realloc)
		return 0;
	p = cxt->realloc(NULL, tot);
	if (!p)
		return 0;

	memcpy(p, cxt->bph, sizeof(struct boot_param_header));
	/* offsets get fixed up later */

	cxt->bph = (struct boot_param_header *)p;
	cxt->max_size = tot;
	pend = p + tot;
	p += HDR_SIZE;

	memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size);
	cxt->rgn[FT_RSVMAP].start = p;
	p += cxt->rgn[FT_RSVMAP].size;

	memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size);
	ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
			p - cxt->rgn[FT_STRUCT].start);
	cxt->p += p - cxt->rgn[FT_STRUCT].start;
	cxt->rgn[FT_STRUCT].start = p;

	p = pend - cxt->rgn[FT_STRINGS].size;
	memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size);
	stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start;
	cxt->rgn[FT_STRINGS].start = p;
	cxt->str_anchor = p + stroff;

	cxt->isordered = 1;
	return 1;
}
Пример #30
0
/* Depending on whether this is called from iSeries or pSeries setup
 * code, the location of the msChunks struct may or may not have
 * to be reloc'd, so we force the caller to do that for us by passing
 * in a pointer to the structure.
 */
unsigned long
msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size)
{
	unsigned long offset = reloc_offset();
	struct msChunks *_msChunks = PTRRELOC(&msChunks);

	_msChunks->num_chunks  = num_chunks;
	_msChunks->chunk_size  = chunk_size;
	_msChunks->chunk_shift = __ilog2(chunk_size);
	_msChunks->chunk_mask  = (1UL<<_msChunks->chunk_shift)-1;

	mem = _ALIGN(mem, sizeof(msChunks_entry));
	_msChunks->abs = (msChunks_entry *)(mem + offset);
	mem += num_chunks * sizeof(msChunks_entry);

	return mem;
}