コード例 #1
0
ファイル: proc.c プロジェクト: xuchiheng/ucore_plus
int do_mprotect(void *addr, size_t len, int prot)
{

	/*
	   return 0; 
	 */

	struct mm_struct *mm = current->mm;
	assert(mm != NULL);
	if (len == 0) {
		return -E_INVAL;
	}
	uintptr_t start = ROUNDDOWN(addr, PGSIZE);
	uintptr_t end = ROUNDUP(addr + len, PGSIZE);

	int ret = -E_INVAL;
	lock_mm(mm);

	while (1) {
		struct vma_struct *vma = find_vma(mm, start);
		uintptr_t last_end;
		if (vma != NULL) {
			last_end = vma->vm_end;
		}
		if (vma == NULL) {
			goto out;
		} else if (vma->vm_start == start && vma->vm_end == end) {
			if (prot & PROT_WRITE) {
				vma->vm_flags |= VM_WRITE;
			} else {
				vma->vm_flags &= ~VM_WRITE;
			}
		} else {
			uintptr_t this_end =
			    (end <= vma->vm_end) ? end : vma->vm_end;
			uintptr_t this_start =
			    (start >= vma->vm_start) ? start : vma->vm_start;

			struct mapped_file_struct mfile = vma->mfile;
			mfile.offset += this_start - vma->vm_start;
			uint32_t flags = vma->vm_flags;
			if ((ret =
			     mm_unmap_keep_pages(mm, this_start,
						 this_end - this_start)) != 0) {
				goto out;
			}
			if (prot & PROT_WRITE) {
				flags |= VM_WRITE;
			} else {
				flags &= ~VM_WRITE;
			}
			if ((ret =
			     mm_map(mm, this_start, this_end - this_start,
				    flags, &vma)) != 0) {
				goto out;
			}
			vma->mfile = mfile;
			if (vma->mfile.file != NULL) {
				filemap_acquire(mfile.file);
			}
		}

		ret = 0;

		if (end <= last_end)
			break;
		start = last_end;
	}

out:
	unlock_mm(mm);
	return ret;
}
コード例 #2
0
// HEADSIGNITURE + BUFFERSIZE + BUFFERID + PFILENAME + LINENUMBER + BUFFER + TAILSIGNITURE

//All the header info must be ULONGs,
//so that the user buffer falls on a word boundary
//The tail must be a byte, since if it was a ULONG it would
//also require a word boundary, but the users buffer could
//be an odd number of bytes, so instead of rounding up, just use BYTE

const ULONG		HEADSIZE		= sizeof(ULONG);  //HEADSIGNITURE
const SIZE_T	LENGTHSIZE		= sizeof(SIZE_T); //BUFFERSIZE
const ULONG		IDSIZE			= sizeof(ULONG);  //BUFFERID
const ULONG		FILENAMESIZE	= sizeof(WCHAR*); //PFILENAME
const ULONG		LINENUMBERSIZE	= sizeof(ULONG);  //LINENUMBER
const ULONG		TAILSIZE		= sizeof(BYTE);	  //TAILSIGNITURE

const ULONG HEADERSIZE = (ULONG)ROUNDUP(HEADSIZE + LENGTHSIZE + IDSIZE + FILENAMESIZE + LINENUMBERSIZE);
const ULONG FOOTERSIZE = TAILSIZE;

const BYTE  HEADSIGN = '{';
const BYTE  TAILSIGN = '}';

const BYTE  ALLOCSIGN = '$';
const BYTE  FREESIGN  = 'Z';

#define HEAD_OFFSET(pActual)		((BYTE*)pActual)
#define TAIL_OFFSET(pActual)		(USERS_OFFSET(pActual)+BUFFER_LENGTH(pActual))

#define USERS_OFFSET(pActual)		(HEAD_OFFSET(pActual) + HEADERSIZE)
#define HEADER_OFFSET(pRequest) 	((BYTE*)(pRequest) - HEADERSIZE)	

#define LENGTH_OFFSET(pActual)		(HEAD_OFFSET(pActual) + HEADSIZE)	
コード例 #3
0
ファイル: asm.c プロジェクト: qioixiy/harvey
void
asmmeminit(void)
{
    Proc *up = externup();
    int i, l;
    Asm* assem;
    PTE *pte, *pml4;
    uintptr va;
    uintmem hi, lo, mem, nextmem, pa;
#ifdef ConfCrap
    int cx;
#endif /* ConfCrap */

    assert(!((sys->vmunmapped|sys->vmend) & machp()->pgszmask[1]));

    if((pa = mmuphysaddr(sys->vmunused)) == ~0)
        panic("asmmeminit 1");
    pa += sys->vmunmapped - sys->vmunused;
    mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0);
    if(mem != pa)
        panic("asmmeminit 2");
    DBG("pa %#llux mem %#llux\n", pa, mem);

    /* assume already 2MiB aligned*/
    assert(ALIGNED(sys->vmunmapped, 2*MiB));
    pml4 = UINT2PTR(machp()->pml4->va);
    while(sys->vmunmapped < sys->vmend) {
        l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc);
        DBG("%#p l %d\n", sys->vmunmapped, l);
        *pte = pa|PtePS|PteRW|PteP;
        sys->vmunmapped += 2*MiB;
        pa += 2*MiB;
    }

#ifdef ConfCrap
    cx = 0;
#endif /* ConfCrap */
    for(assem = asmlist; assem != nil; assem = assem->next) {
        if(assem->type != AsmMEMORY)
            continue;
        va = KSEG2+assem->addr;
        print("asm: addr %#P end %#P type %d size %P\n",
              assem->addr, assem->addr+assem->size,
              assem->type, assem->size);

        lo = assem->addr;
        hi = assem->addr+assem->size;
        /* Convert a range into pages */
        for(mem = lo; mem < hi; mem = nextmem) {
            nextmem = (mem + PGLSZ(0)) & ~machp()->pgszmask[0];

            /* Try large pages first */
            for(i = m->npgsz - 1; i >= 0; i--) {
                if((mem & machp()->pgszmask[i]) != 0)
                    continue;
                if(mem + PGLSZ(i) > hi)
                    continue;
                /* This page fits entirely within the range. */
                /* Mark it a usable */
                if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0)
                    panic("asmmeminit 3");

                *pte = mem|PteRW|PteP;
                if(l > 0)
                    *pte |= PtePS;

                nextmem = mem + PGLSZ(i);
                va += PGLSZ(i);
                npg[i]++;

                break;
            }
        }

#ifdef ConfCrap
        /*
         * Fill in conf crap.
         */
        if(cx >= nelem(conf.mem))
            continue;
        lo = ROUNDUP(assem->addr, PGSZ);
//if(lo >= 600ull*MiB)
//    continue;
        conf.mem[cx].base = lo;
        hi = ROUNDDN(hi, PGSZ);
//if(hi > 600ull*MiB)
//  hi = 600*MiB;
        conf.mem[cx].npage = (hi - lo)/PGSZ;
        conf.npage += conf.mem[cx].npage;
        print("cm %d: addr %#llux npage %lud\n",
              cx, conf.mem[cx].base, conf.mem[cx].npage);
        cx++;
#endif /* ConfCrap */
    }
    print("%d %d %d\n", npg[0], npg[1], npg[2]);

#ifdef ConfCrap
    /*
     * Fill in more conf crap.
     * This is why I hate Plan 9.
     */
    conf.upages = conf.npage;
    i = (sys->vmend - sys->vmstart)/PGSZ;		/* close enough */
    conf.ialloc = (i/2)*PGSZ;
    print("npage %llud upage %lud kpage %d\n",
          conf.npage, conf.upages, i);

#endif /* ConfCrap */
}
コード例 #4
0
ファイル: nic.c プロジェクト: packetngin/rtos
Packet* nic_alloc(NIC* nic, uint16_t size) {
	uint8_t* bitmap = (void*)nic + nic->pool.bitmap;
	uint32_t count = nic->pool.count;
	void* pool = (void*)nic + nic->pool.pool;

	uint32_t size2 = sizeof(Packet) + nic->padding_head + size + nic->padding_tail;
	uint8_t req = (ROUNDUP(size2, NIC_CHUNK_SIZE)) / NIC_CHUNK_SIZE;
	uint32_t index = nic->pool.index;

	lock_lock(&nic->pool.lock);

	// Find tail
	uint32_t idx = 0;
	for(idx = index; idx <= count - req; idx++) {
		for(uint32_t j = 0; j < req; j++) {
			if(bitmap[idx + j] == 0) {
				continue;
			} else {
				idx += j + bitmap[idx + j];
				goto next;
			}
		}

		goto found;
next:
		;
	}

	// Find head
	for(idx = 0; idx < index - req; idx++) {
		for(uint32_t j = 0; j < req; j++) {
			if(bitmap[idx + j] == 0) {
				continue;
			} else {
				idx += j + bitmap[idx + j];
				goto notfound;
			}
		}

		goto found;
	}

notfound:
	// Not found
	lock_unlock(&nic->pool.lock);
	return NULL;

found:
	nic->pool.index = idx + req;
	for(uint32_t k = 0; k < req; k++) {
		bitmap[idx + k] = req - k;
	}

	nic->pool.used += req;

	lock_unlock(&nic->pool.lock);

	Packet* packet = pool + (idx * NIC_CHUNK_SIZE);
	packet->time = 0;
	packet->start = 0;
	packet->end = 0;
	packet->size = (req * NIC_CHUNK_SIZE) - sizeof(Packet);

	return packet;
}
コード例 #5
0
ファイル: arch_thread.c プロジェクト: HTshandou/newos
int
arch_setup_signal_frame(struct thread *t, struct sigaction *sa, int sig, int sig_mask)
{
#warning implement arch_setup_signal_frame
	PANIC_UNIMPLEMENTED();
#if 0
	struct iframe *frame = x86_64_get_curr_iframe();
	uint32 *stack_ptr;
	uint32 *code_ptr;
	uint32 *regs_ptr;
	struct vregs regs;
	uint32 stack_buf[6];
	int err;

	/* do some quick sanity checks */
	ASSERT(frame);
	ASSERT(is_user_address(frame->user_sp));

//	dprintf("arch_setup_signal_frame: thread 0x%x, frame %p, user_esp 0x%x, sig %d\n",
//		t->id, frame, frame->user_esp, sig);

	if ((int)frame->orig_eax >= 0) {
		// we're coming from a syscall
		if (((int)frame->eax == ERR_INTERRUPTED) && (sa->sa_flags & SA_RESTART)) {
			dprintf("### restarting syscall %d after signal %d\n", frame->orig_eax, sig);
			frame->eax = frame->orig_eax;
			frame->edx = frame->orig_edx;
			frame->eip -= 2;
		}
	}

	// start stuffing stuff on the user stack
	stack_ptr = (uint32 *)frame->user_sp;

	// store the saved regs onto the user stack
	stack_ptr -= ROUNDUP(sizeof(struct vregs)/4, 4);
	regs_ptr = stack_ptr;
	regs.eip = frame->eip;
	regs.eflags = frame->flags;
	regs.eax = frame->eax;
	regs.ecx = frame->ecx;
	regs.edx = frame->edx;
	regs.esp = frame->esp;
	regs._reserved_1 = frame->user_sp;
	regs._reserved_2[0] = frame->edi;
	regs._reserved_2[1] = frame->esi;
	regs._reserved_2[2] = frame->ebp;
	x86_64_fsave((void *)(&regs.xregs));
	
	err = user_memcpy(stack_ptr, &regs, sizeof(regs));
	if(err < 0)
		return err;

	// now store a code snippet on the stack
	stack_ptr -= ((uint32)x86_64_end_return_from_signal - (uint32)x86_64_return_from_signal)/4;
	code_ptr = stack_ptr;
	err = user_memcpy(code_ptr, x86_64_return_from_signal,
		((uint32)x86_64_end_return_from_signal - (uint32)x86_64_return_from_signal));
	if(err < 0)
		return err;

	// now set up the final part
	stack_buf[0] = (uint32)code_ptr;	// return address when sa_handler done
	stack_buf[1] = sig;					// first argument to sa_handler
	stack_buf[2] = (uint32)sa->sa_userdata;// second argument to sa_handler
	stack_buf[3] = (uint32)regs_ptr;	// third argument to sa_handler

	stack_buf[4] = sig_mask;			// Old signal mask to restore
	stack_buf[5] = (uint32)regs_ptr;	// Int frame + extra regs to restore

	stack_ptr -= sizeof(stack_buf)/4;

	err = user_memcpy(stack_ptr, stack_buf, sizeof(stack_buf));
	if(err < 0)
		return err;
	
	frame->user_esp = (uint32)stack_ptr;
	frame->eip = (uint32)sa->sa_handler;

	return NO_ERROR;
#endif
}
コード例 #6
0
ファイル: osystem_gfx.cpp プロジェクト: 0xf1sh/scummvm
void OSystem_Wii::setMouseCursor(const void *buf, uint w, uint h, int hotspotX,
									int hotspotY, uint32 keycolor,
									bool dontScale,
									const Graphics::PixelFormat *format) {
	gfx_tex_format_t tex_format = GFX_TF_PALETTE_RGB5A3;
	uint tw, th;
	bool tmpBuf = false;
	uint32 oldKeycolor = _mouseKeyColor;

#ifdef USE_RGB_COLOR
	if (!format)
		_pfCursor = Graphics::PixelFormat::createFormatCLUT8();
	else
		_pfCursor = *format;

	if (_pfCursor.bytesPerPixel > 1) {
		tex_format = GFX_TF_RGB5A3;
		_mouseKeyColor = keycolor & 0xffff;
		tw = ROUNDUP(w, 4);
		th = ROUNDUP(h, 4);

		if (_pfCursor != _pfRGB3444)
			tmpBuf = true;
	} else {
#endif
		_mouseKeyColor = keycolor & 0xff;
		tw = ROUNDUP(w, 8);
		th = ROUNDUP(h, 4);
#ifdef USE_RGB_COLOR
	}
#endif

	if (!gfx_tex_init(&_texMouse, tex_format, TLUT_MOUSE, tw, th)) {
		printf("could not init the mouse texture\n");
		::abort();
	}

	gfx_tex_set_bilinear_filter(&_texMouse, _bilinearFilter);

	if ((tw != w) || (th != h))
		tmpBuf = true;

	if (!tmpBuf) {
		gfx_tex_convert(&_texMouse, (const byte *)buf);
	} else {
		u8 bpp = _texMouse.bpp >> 3;
		byte *tmp = (byte *) malloc(tw * th * bpp);

		if (!tmp) {
			printf("could not alloc temp cursor buffer\n");
			::abort();
		}

		if (bpp > 1)
			memset(tmp, 0, tw * th * bpp);
		else
			memset(tmp, _mouseKeyColor, tw * th);

#ifdef USE_RGB_COLOR
		if (bpp > 1) {
			if (!Graphics::crossBlit(tmp, (const byte *)buf,
										tw * _pfRGB3444.bytesPerPixel,
										w * _pfCursor.bytesPerPixel,
										tw, th, _pfRGB3444, _pfCursor)) {
				printf("crossBlit failed (cursor)\n");
				::abort();
			}

			// nasty, shouldn't the frontend set the alpha channel?
			u16 *s = (u16 *) buf;
			u16 *d = (u16 *) tmp;
			for (u16 y = 0; y < h; ++y) {
				for (u16 x = 0; x < w; ++x) {
					if (*s++ == _mouseKeyColor)
						*d++ &= ~(7 << 12);
					else
						d++;
				}

				d += tw - w;
			}
		} else {
#endif
			byte *dst = tmp;
			const byte *src = (const byte *)buf;
			do {
				memcpy(dst, src, w * bpp);
				src += w * bpp;
				dst += tw * bpp;
			} while (--h);
#ifdef USE_RGB_COLOR
		}
#endif

		gfx_tex_convert(&_texMouse, tmp);
		free(tmp);
	}

	_mouseHotspotX = hotspotX;
	_mouseHotspotY = hotspotY;
	_cursorDontScale = dontScale;

	if ((_texMouse.palette) && (oldKeycolor != _mouseKeyColor))
		_cursorPaletteDirty = true;
}
コード例 #7
0
ファイル: wmifinfo.c プロジェクト: rnjacobs/dockapps
int getifinfo(char *ifname, struct ifinfo_t *info)
{
	struct ifreq ifr;
	int r;
	struct sockaddr_in *sa;

#ifdef linux
	static FILE *froute = NULL;
	static FILE *fwireless = NULL;
	static FILE *fdev = NULL;
#elif defined(__OpenBSD__)
	struct ifreq ibuf[32];
	struct ifconf ifc;
	struct ifreq *ifrp, *ifend;
#endif

	char parent[16];
	char buf[1024];
	char *p;
	char a[16];
	int b,c,d;

#ifdef linux
	if(froute == NULL) froute = fopen("/proc/net/route", "r");
	if(fwireless == NULL) fwireless = fopen("/proc/net/wireless", "r");
	if(fdev == NULL) fdev = fopen("/proc/net/dev", "r");
#endif


	strcpy(parent, ifname);
	p=strchr(parent, ':');
	if(p) *p=0;

	strcpy(info->id, ifname);

	strcpy(ifr.ifr_name, ifname);

	// Get status (UP/DOWN)

	if(ioctl(fd, SIOCGIFFLAGS, &ifr) != -1) {
		sa = (struct sockaddr_in *)&(ifr.ifr_addr);
		info->state = (ifr.ifr_flags & 1) ? 1 : 0;
	} else {
		info->state = 0;
	}

	// Get mac address

#ifdef linux
	if(ioctl(fd, SIOCGIFHWADDR, &ifr) != -1) {
		memcpy(info->hw, ifr.ifr_hwaddr.sa_data, 6);
	} else {
		memset(info->hw, 0, 6);
	}
#elif defined(__OpenBSD__)
	ifc.ifc_len = sizeof(ibuf);
	ifc.ifc_buf = (caddr_t) ibuf;
	if (ioctl(fd, SIOCGIFCONF, (char *) &ifc) == -1 ||
			ifc.ifc_len < sizeof(struct ifreq)) {
		memset(info->hw, 0, 6);
	} else {
		/* Search interface configuration list for link layer address. */
		ifrp = ibuf;
		ifend = (struct ifreq *) ((char *) ibuf + ifc.ifc_len);
		while (ifrp < ifend) {
			/* Look for interface */
			if (strcmp(ifname, ifrp->ifr_name) == 0 &&
					ifrp->ifr_addr.sa_family == AF_LINK &&
					((struct sockaddr_dl *) &ifrp->ifr_addr)->sdl_type == IFT_ETHER) {
				memcpy(info->hw, LLADDR((struct sockaddr_dl *) &ifrp->ifr_addr), 6);
				break;
			}
			/* Bump interface config pointer */
			r = ifrp->ifr_addr.sa_len + sizeof(ifrp->ifr_name);
			if (r < sizeof(*ifrp))
				r = sizeof(*ifrp);
			ifrp = (struct ifreq *) ((char *) ifrp + r);
		}
	}
#endif

	// Get IP address

	if(ioctl(fd, SIOCGIFADDR, &ifr) != -1) {
		sa = (struct sockaddr_in *)&(ifr.ifr_addr);
		info->ip = sa->sin_addr.s_addr;
	} else {
		info->ip = 0;
	}

	// Get netmask

	if(ioctl(fd, SIOCGIFNETMASK, &ifr) != -1) {
		sa = (struct sockaddr_in *)&(ifr.ifr_addr);
		info->nm = sa->sin_addr.s_addr;
	} else {
		info->nm = 0;
	}

	// Get default gateway if on this interface

	info->gw = 0;
#ifdef linux
	if(froute != NULL) {
		fseek(froute, 0, 0);

		while(fgets(buf, sizeof(buf), froute)) {
			r = sscanf(buf, "%s %x %x", a, &b, &c);

			if((strcmp(a, info->id) == 0) && (b == 0)) {
				info->gw = c;
			}
		}

	}
#elif defined(__OpenBSD__)
	{
	struct rt_msghdr *rtm = NULL;
	char *buf = NULL, *next, *lim = NULL;
	size_t needed;
	int mib[6];
	struct sockaddr *sa;
	struct sockaddr_in *sin;

	mib[0] = CTL_NET;
	mib[1] = PF_ROUTE;
	mib[2] = 0;
	mib[3] = AF_INET;
	mib[4] = NET_RT_DUMP;
	mib[5] = 0;
	if (sysctl(mib, 6, NULL, &needed, NULL, 0) == -1) {
		perror("route-sysctl-estimate");
		exit(1);
	}
	if (needed > 0) {
		if ((buf = malloc(needed)) == 0) {
			printf("out of space\n");
			exit(1);
		}
		if (sysctl(mib, 6, buf, &needed, NULL, 0) == -1) {
			perror("sysctl of routing table");
			exit(1);
		}
		lim  = buf + needed;
	}

	if (buf) {
		for (next = buf; next < lim; next += rtm->rtm_msglen) {
			rtm = (struct rt_msghdr *)next;
			sa = (struct sockaddr *)(rtm + 1);
			sin = (struct sockaddr_in *)sa;

			if (sin->sin_addr.s_addr == 0) {
				sa = (struct sockaddr *)(ROUNDUP(sa->sa_len) + (char *)sa);
				sin = (struct sockaddr_in *)sa;
				info->gw = sin->sin_addr.s_addr;
				break;
			}
		}
		free(buf);
	}
	}
#endif

	// Get wireless link status if wireless

	info->sl = 0;
#ifdef linux
	if(fwireless != NULL) {
		fseek(fwireless, 0, 0);

		while(fgets(buf, sizeof(buf), fwireless)) {
			r = sscanf(buf, "%s %d %d ", a, &b, &c);
			if(strchr(a, ':'))  *(strchr(a, ':')) = 0;
			if(strcmp(a, parent) == 0) {
				info->sl = c;
			}
		}
	}

#ifdef ENABLE_NWN_SUPPORT
	if (info->sl == 0) {
		info->sl = nwn_get_link(parent);
	}
#endif
#elif defined(__OpenBSD__)
	{
	struct wi_req	wreq;
	struct ifreq	ifr;

	wreq.wi_len = WI_MAX_DATALEN;
	wreq.wi_type = WI_RID_COMMS_QUALITY;

	strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
	ifr.ifr_data = (caddr_t)&wreq;

	if (ioctl(fd, SIOCGWAVELAN, &ifr) != -1)
		info->sl = letoh16(wreq.wi_val[0]);
	}
#endif

	// Get Total tx/rx bytes

#ifdef linux
	if(fdev != NULL) {
		fseek(fdev, 0, 0);

		while(fgets(buf, sizeof(buf), fdev)) {
			r = sscanf(buf, "%s %d %d %d %d %d %d %d %d %d", a, &b, &d,&d,&d,&d,&d,&d,&d, &c);
			if(strchr(a, ':'))  *(strchr(a, ':')) = 0;
			if(strcmp(a, parent) == 0) {
				info->bytes = b + c;
			}
		}
	}
#endif

	return(0);
}
コード例 #8
0
status_t
M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
{
	start = ROUNDDOWN(start, B_PAGE_SIZE);
	if (start >= end)
		return B_OK;

	TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);

	page_root_entry *pr = fPagingStructures->pgroot_virt;
	page_directory_entry *pd;
	page_table_entry *pt;
	int index;

	do {
		index = VADDR_TO_PRENT(start);
		if (PRE_TYPE(pr[index]) != DT_ROOT) {
			// no pagedir here, move the start up to access the next page
			// dir group
			start = ROUNDUP(start + 1, kPageDirAlignment);
			continue;
		}

		Thread* thread = thread_get_current_thread();
		ThreadCPUPinner pinner(thread);

		pd = (page_directory_entry*)MapperGetPageTableAt(
			PRE_TO_PA(pr[index]));
		// we want the table at rindex, not at rindex%(tbl/page)
		//pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;


		index = VADDR_TO_PDENT(start);
		if (PDE_TYPE(pd[index]) != DT_DIR) {
			// no pagedir here, move the start up to access the next page
			// table group
			start = ROUNDUP(start + 1, kPageTableAlignment);
			continue;
		}

		pt = (page_table_entry*)MapperGetPageTableAt(
			PDE_TO_PA(pd[index]));
		// we want the table at rindex, not at rindex%(tbl/page)
		//pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;

		for (index = VADDR_TO_PTENT(start);
				(index < NUM_PAGEENT_PER_TBL) && (start < end);
				index++, start += B_PAGE_SIZE) {
			if (PTE_TYPE(pt[index]) != DT_PAGE
				&& PTE_TYPE(pt[index]) != DT_INDIRECT) {
				// page mapping not valid
				continue;
			}

			TRACE("::Unmap: removing page 0x%lx\n", start);

			page_table_entry oldEntry
				= M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
			fMapCount--;

			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
				// Note, that we only need to invalidate the address, if the
				// accessed flags was set, since only then the entry could have
				// been in any TLB.
				InvalidatePage(start);
			}
		}
	} while (start != 0 && start < end);

	return B_OK;
}
コード例 #9
0
ファイル: DbOledb.cpp プロジェクト: okigan/dblib
BOOL COledbRecordset::_BindColumns()
{
   _ASSERTE(m_rgBindings==NULL);
   
   if( !IsOpen() ) return FALSE;

   HRESULT Hr;
   m_nCols = 0;
   m_pwstrNameBuffer = NULL;

   CComQIPtr<IColumnsInfo> spColInfo = m_spRowset;
   if( spColInfo == NULL ) return FALSE;
   DBCOLUMNINFO* rgColumnInfo = NULL;
   ULONG nCols = 0;
   Hr = spColInfo->GetColumnInfo(&nCols, &rgColumnInfo, &m_pwstrNameBuffer);
   if( FAILED(Hr) ) return _Error(Hr);

   // Allocate memory for the bindings array; there is a one-to-one
   // mapping between the columns returned from GetColumnInfo() and our
   // bindings.
   long cbAlloc = nCols * sizeof(DBBINDING);
   m_rgBindings = (DBBINDING*) ::CoTaskMemAlloc(cbAlloc);
   if( m_rgBindings == NULL ) return FALSE;
   ::ZeroMemory(m_rgBindings, cbAlloc);
   m_iAdjustIndex = 0;

   // Construct the binding array element for each column.
   ULONG dwOffset = 0;
   for( ULONG iCol = 0; iCol < nCols; iCol++ ) {
      DBBINDING& b = m_rgBindings[iCol];
      b.iOrdinal = rgColumnInfo[iCol].iOrdinal;
      b.dwPart = DBPART_VALUE | DBPART_LENGTH | DBPART_STATUS;
      b.obStatus = dwOffset;
      b.obLength = dwOffset + sizeof(DBSTATUS);
      b.obValue = dwOffset + sizeof(DBSTATUS) + sizeof(ULONG);    
      b.dwMemOwner = DBMEMOWNER_CLIENTOWNED;
      b.eParamIO = DBPARAMIO_NOTPARAM;
      b.bPrecision = rgColumnInfo[iCol].bPrecision;
      b.bScale = rgColumnInfo[iCol].bScale;

      // Ignore bookmark column
      if( (rgColumnInfo[iCol].dwFlags & DBCOLUMNFLAGS_ISBOOKMARK) != 0 ) m_iAdjustIndex++;

      WORD wType = rgColumnInfo[iCol].wType;
      switch( wType ) {
      case DBTYPE_CY:
      case DBTYPE_DECIMAL:
      case DBTYPE_NUMERIC:
         b.wType = DBTYPE_STR;
         b.cbMaxLen = 50; // Allow 50 characters for conversion
         break;
      case DBTYPE_STR:
      case DBTYPE_WSTR:
#ifdef _UNICODE
         b.wType = DBTYPE_WSTR;
#else
         b.wType = DBTYPE_STR;
#endif
         b.cbMaxLen = max(min((rgColumnInfo[iCol].ulColumnSize + 1UL) * sizeof(TCHAR), 1024UL), 0UL);
         break;
      default:
         b.wType = wType;
         b.cbMaxLen = max(min(rgColumnInfo[iCol].ulColumnSize, 1024UL), 0UL);
      }

// ROUNDUP on all platforms pointers must be aligned properly
#define ROUNDUP_AMOUNT 8
#define ROUNDUP_(size,amount) (((ULONG)(size)+((amount)-1))&~((amount)-1))
#define ROUNDUP(size)         ROUNDUP_(size, ROUNDUP_AMOUNT)
    
      // Update the offset past the end of this column's data
      dwOffset = b.cbMaxLen + b.obValue;
      dwOffset = ROUNDUP(dwOffset);
   }

   m_nCols = (short) nCols;
   m_dwBufferSize = dwOffset;

   ::CoTaskMemFree(rgColumnInfo);

   // Create accessor
   CComQIPtr<IAccessor> spAccessor = m_spRowset;
   if( spAccessor == NULL ) return FALSE;
   Hr = spAccessor->CreateAccessor(DBACCESSOR_ROWDATA, m_nCols, m_rgBindings, 0, &m_hAccessor, NULL);
   if( FAILED(Hr) ) return _Error(Hr);

   m_pData = ::CoTaskMemAlloc(m_dwBufferSize);
   if( m_pData == NULL ) return FALSE;

   return TRUE;
}
コード例 #10
0
ファイル: heap.c プロジェクト: sndnvaps/HTC-leo-cLK
void *heap_alloc(size_t size, unsigned int alignment)
{
	void *ptr;
#if DEBUG_HEAP
	size_t original_size = size;
#endif
	LTRACEF("size %zd, align %d\n", size, alignment);

	// deal with the pending free list
	if (unlikely(!list_is_empty(&theheap.delayed_free_list))) {
		heap_free_delayed_list();
	}

	// alignment must be power of 2
	if (alignment & (alignment - 1))
		return NULL;

	// we always put a size field + base pointer + magic in front of the allocation
	size += sizeof(struct alloc_struct_begin);
#if DEBUG_HEAP
	size += PADDING_SIZE;
#endif
	
	// make sure we allocate at least the size of a struct free_heap_chunk so that
	// when we free it, we can create a struct free_heap_chunk struct and stick it
	// in the spot
	if (size < sizeof(struct free_heap_chunk))
		size = sizeof(struct free_heap_chunk);

	// round up size to a multiple of native pointer size
	size = ROUNDUP(size, sizeof(void *));

	// deal with nonzero alignments
	if (alignment > 0) {
		if (alignment < 16)
			alignment = 16;

		// add alignment for worst case fit
		size += alignment;
	}

	mutex_acquire(&theheap.lock);

	// walk through the list
	ptr = NULL;
	struct free_heap_chunk *chunk;
	list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
		DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size

		// is it big enough to service our allocation?
		if (chunk->len >= size) {
			ptr = chunk;

			// remove it from the list
			struct list_node *next_node = list_next(&theheap.free_list, &chunk->node);
			list_delete(&chunk->node);

			if (chunk->len > size + sizeof(struct free_heap_chunk)) {
				// there's enough space in this chunk to create a new one after the allocation
				struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size);

				// truncate this chunk
				chunk->len -= chunk->len - size;

				// add the new one where chunk used to be
				if (next_node)
					list_add_before(next_node, &newchunk->node);
				else
					list_add_tail(&theheap.free_list, &newchunk->node);
			}

			// the allocated size is actually the length of this chunk, not the size requested
			DEBUG_ASSERT(chunk->len >= size);
			size = chunk->len;
			
#if DEBUG_HEAP
			memset(ptr, ALLOC_FILL, size);
#endif

			ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin));

			// align the output if requested
			if (alignment > 0) {
				ptr = (void *)ROUNDUP((addr_t)ptr, alignment);
			}

			struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
			as--;
			as->magic = HEAP_MAGIC;
			as->ptr = (void *)chunk;
			as->size = size;
#if DEBUG_HEAP
			as->padding_start = ((uint8_t *)ptr + original_size);
			as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size));
			//printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size);

			memset(as->padding_start, PADDING_FILL, as->padding_size);
#endif

			break;
		}
	}

	mutex_release(&theheap.lock);

	LTRACEF("returning ptr %p\n", ptr);

	return ptr;
}
コード例 #11
0
void write_sparse_image(
		struct sparse_storage *info, const char *part_name,
		void *data, unsigned sz)
{
	lbaint_t blk;
	lbaint_t blkcnt;
	lbaint_t blks;
	uint32_t bytes_written = 0;
	unsigned int chunk;
	unsigned int offset;
	unsigned int chunk_data_sz;
	uint32_t *fill_buf = NULL;
	uint32_t fill_val;
	sparse_header_t *sparse_header;
	chunk_header_t *chunk_header;
	uint32_t total_blocks = 0;
	int fill_buf_num_blks;
	int i;
	int j;

	fill_buf_num_blks = CONFIG_FASTBOOT_FLASH_FILLBUF_SIZE / info->blksz;

	/* Read and skip over sparse image header */
	sparse_header = (sparse_header_t *)data;

	data += sparse_header->file_hdr_sz;
	if (sparse_header->file_hdr_sz > sizeof(sparse_header_t)) {
		/*
		 * Skip the remaining bytes in a header that is longer than
		 * we expected.
		 */
		data += (sparse_header->file_hdr_sz - sizeof(sparse_header_t));
	}

	debug("=== Sparse Image Header ===\n");
	debug("magic: 0x%x\n", sparse_header->magic);
	debug("major_version: 0x%x\n", sparse_header->major_version);
	debug("minor_version: 0x%x\n", sparse_header->minor_version);
	debug("file_hdr_sz: %d\n", sparse_header->file_hdr_sz);
	debug("chunk_hdr_sz: %d\n", sparse_header->chunk_hdr_sz);
	debug("blk_sz: %d\n", sparse_header->blk_sz);
	debug("total_blks: %d\n", sparse_header->total_blks);
	debug("total_chunks: %d\n", sparse_header->total_chunks);

	/*
	 * Verify that the sparse block size is a multiple of our
	 * storage backend block size
	 */
	div_u64_rem(sparse_header->blk_sz, info->blksz, &offset);
	if (offset) {
		printf("%s: Sparse image block size issue [%u]\n",
		       __func__, sparse_header->blk_sz);
		fastboot_fail("sparse image block size issue");
		return;
	}

	puts("Flashing Sparse Image\n");

	/* Start processing chunks */
	blk = info->start;
	for (chunk = 0; chunk < sparse_header->total_chunks; chunk++) {
		/* Read and skip over chunk header */
		chunk_header = (chunk_header_t *)data;
		data += sizeof(chunk_header_t);

		if (chunk_header->chunk_type != CHUNK_TYPE_RAW) {
			debug("=== Chunk Header ===\n");
			debug("chunk_type: 0x%x\n", chunk_header->chunk_type);
			debug("chunk_data_sz: 0x%x\n", chunk_header->chunk_sz);
			debug("total_size: 0x%x\n", chunk_header->total_sz);
		}

		if (sparse_header->chunk_hdr_sz > sizeof(chunk_header_t)) {
			/*
			 * Skip the remaining bytes in a header that is longer
			 * than we expected.
			 */
			data += (sparse_header->chunk_hdr_sz -
				 sizeof(chunk_header_t));
		}

		chunk_data_sz = sparse_header->blk_sz * chunk_header->chunk_sz;
		blkcnt = chunk_data_sz / info->blksz;
		switch (chunk_header->chunk_type) {
		case CHUNK_TYPE_RAW:
			if (chunk_header->total_sz !=
			    (sparse_header->chunk_hdr_sz + chunk_data_sz)) {
				fastboot_fail(
					"Bogus chunk size for chunk type Raw");
				return;
			}

			if (blk + blkcnt > info->start + info->size) {
				printf(
				    "%s: Request would exceed partition size!\n",
				    __func__);
				fastboot_fail(
				    "Request would exceed partition size!");
				return;
			}

			blks = info->write(info, blk, blkcnt, data);
			/* blks might be > blkcnt (eg. NAND bad-blocks) */
			if (blks < blkcnt) {
				printf("%s: %s" LBAFU " [" LBAFU "]\n",
				       __func__, "Write failed, block #",
				       blk, blks);
				fastboot_fail(
					      "flash write failure");
				return;
			}
			blk += blks;
			bytes_written += blkcnt * info->blksz;
			total_blocks += chunk_header->chunk_sz;
			data += chunk_data_sz;
			break;

		case CHUNK_TYPE_FILL:
			if (chunk_header->total_sz !=
			    (sparse_header->chunk_hdr_sz + sizeof(uint32_t))) {
				fastboot_fail(
					"Bogus chunk size for chunk type FILL");
				return;
			}

			fill_buf = (uint32_t *)
				   memalign(ARCH_DMA_MINALIGN,
					    ROUNDUP(
						info->blksz * fill_buf_num_blks,
						ARCH_DMA_MINALIGN));
			if (!fill_buf) {
				fastboot_fail(
					"Malloc failed for: CHUNK_TYPE_FILL");
				return;
			}

			fill_val = *(uint32_t *)data;
			data = (char *)data + sizeof(uint32_t);

			for (i = 0;
			     i < (info->blksz * fill_buf_num_blks /
				  sizeof(fill_val));
			     i++)
				fill_buf[i] = fill_val;

			if (blk + blkcnt > info->start + info->size) {
				printf(
				    "%s: Request would exceed partition size!\n",
				    __func__);
				fastboot_fail(
				    "Request would exceed partition size!");
				return;
			}

			for (i = 0; i < blkcnt;) {
				j = blkcnt - i;
				if (j > fill_buf_num_blks)
					j = fill_buf_num_blks;
				blks = info->write(info, blk, j, fill_buf);
				/* blks might be > j (eg. NAND bad-blocks) */
				if (blks < j) {
					printf("%s: %s " LBAFU " [%d]\n",
					       __func__,
					       "Write failed, block #",
					       blk, j);
					fastboot_fail(
						      "flash write failure");
					free(fill_buf);
					return;
				}
				blk += blks;
				i += j;
			}
			bytes_written += blkcnt * info->blksz;
			total_blocks += chunk_data_sz / sparse_header->blk_sz;
			free(fill_buf);
			break;

		case CHUNK_TYPE_DONT_CARE:
			blk += info->reserve(info, blk, blkcnt);
			total_blocks += chunk_header->chunk_sz;
			break;

		case CHUNK_TYPE_CRC32:
			if (chunk_header->total_sz !=
			    sparse_header->chunk_hdr_sz) {
				fastboot_fail(
					"Bogus chunk size for chunk type Dont Care");
				return;
			}
			total_blocks += chunk_header->chunk_sz;
			data += chunk_data_sz;
			break;

		default:
			printf("%s: Unknown chunk type: %x\n", __func__,
			       chunk_header->chunk_type);
			fastboot_fail("Unknown chunk type");
			return;
		}
	}

	debug("Wrote %d blocks, expected to write %d blocks\n",
	      total_blocks, sparse_header->total_blks);
	printf("........ wrote %u bytes to '%s'\n", bytes_written, part_name);

	if (total_blocks != sparse_header->total_blks)
		fastboot_fail("sparse image write failure");
	else
		fastboot_okay("");

	return;
}
コード例 #12
0
ファイル: nvram_linux.c プロジェクト: mirror/dd-wrt
int
nvram_commit(void)
{
	char *buf;
	size_t erasesize, len, magic_len;
	unsigned int i;
	int ret;
	struct nvram_header *header;
	unsigned long flags;
	u_int32_t offset;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	struct erase_info erase;
	u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */

	if (!nvram_mtd) {
		printk("nvram_commit: NVRAM not found\n");
		return -ENODEV;
	}

	if (in_interrupt()) {
		printk("nvram_commit: not committing in interrupt\n");
		return -EINVAL;
	}

	/* Backup sector blocks to be erased */
	erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize);
	if (!(buf = kmalloc(erasesize, GFP_KERNEL))) {
		printk("nvram_commit: out of memory\n");
		return -ENOMEM;
	}

	down(&nvram_sem);

	if ((i = erasesize - NVRAM_SPACE) > 0) {
		offset = nvram_mtd->size - erasesize;
		len = 0;
		ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
		if (ret || len != i) {
			printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i);
			ret = -EIO;
			goto done;
		}
		header = (struct nvram_header *)(buf + i);
		magic_offset = i + ((void *)&header->magic - (void *)header);
	} else {
		offset = nvram_mtd->size - NVRAM_SPACE;
		magic_offset = ((void *)&header->magic - (void *)header);
		header = (struct nvram_header *)buf;
	}

	/* clear the existing magic # to mark the NVRAM as unusable 
		 we can pull MAGIC bits low without erase	*/
	header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */

	/* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */
	if(nvram_mtd->unlock)
		nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: clear MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM,
																otherwise we'll have an incorrect CRC */
	/* Regenerate NVRAM */
	spin_lock_irqsave(&nvram_lock, flags);
	ret = _nvram_commit(header);
	spin_unlock_irqrestore(&nvram_lock, flags);
	if (ret)
		goto done;

	/* Erase sector blocks */
	init_waitqueue_head(&wait_q);
	for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) {
		erase.mtd = nvram_mtd;
		erase.addr = offset;
		erase.len = nvram_mtd->erasesize;
		erase.callback = erase_callback;
		erase.priv = (u_long) &wait_q;

		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&wait_q, &wait);

		/* Unlock sector blocks */
		if (nvram_mtd->unlock)
			nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

		if ((ret = MTD_ERASE(nvram_mtd, &erase))) {
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&wait_q, &wait);
			printk("nvram_commit: erase error\n");
			goto done;
		}

		/* Wait for erase to finish */
		schedule();
		remove_wait_queue(&wait_q, &wait);
	}

	/* Write partition up to end of data area */
	header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */
	offset = nvram_mtd->size - erasesize;
	i = erasesize - NVRAM_SPACE + header->len;
	ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf);
	if (ret || len != i) {
		printk("nvram_commit: write error\n");
		ret = -EIO;
		goto done;
	}

	/* Now mark the NVRAM in flash as "valid" by setting the correct
		 MAGIC # */
	header->magic = NVRAM_MAGIC;
	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: write MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	/*
	 * Reading a few bytes back here will put the device
	 * back to the correct mode on certain flashes */
	offset = nvram_mtd->size - erasesize;
	ret = MTD_READ(nvram_mtd, offset, 4, &len, buf);

 done:
	up(&nvram_sem);
	kfree(buf);

	return ret;
}
コード例 #13
0
ファイル: proc.c プロジェクト: xuchiheng/ucore_plus
static int load_icode(int fd, int argc, char **kargv, int envc, char **kenvp)
{
	assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
	assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
	if (current->mm != NULL) {
		panic("load_icode: current->mm must be empty.\n");
	}

	int ret = -E_NO_MEM;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t real_entry;
//#endif //UCONFIG_BIONIC_LIBC

	struct mm_struct *mm;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}

	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	mm->brk_start = 0;

	struct Page *page;

	struct elfhdr __elf, *elf = &__elf;
	if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
		goto bad_elf_cleanup_pgdir;
	}

	if (elf->e_magic != ELF_MAGIC) {
		ret = -E_INVAL_ELF;
		goto bad_elf_cleanup_pgdir;
	}
//#ifdef UCONFIG_BIONIC_LIBC
	real_entry = elf->e_entry;

	uint32_t load_address, load_address_flag = 0;
//#endif //UCONFIG_BIONIC_LIBC

	struct proghdr __ph, *ph = &__ph;
	uint32_t vm_flags, phnum;
	pte_perm_t perm = 0;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t is_dynamic = 0, interp_idx;
	uint32_t bias = 0;
//#endif //UCONFIG_BIONIC_LIBC
	for (phnum = 0; phnum < elf->e_phnum; phnum++) {
		off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		if (ph->p_type == ELF_PT_INTERP) {
			is_dynamic = 1;
			interp_idx = phnum;
			continue;
		}

		if (ph->p_type != ELF_PT_LOAD) {
			continue;
		}
		if (ph->p_filesz > ph->p_memsz) {
			ret = -E_INVAL_ELF;
			goto bad_cleanup_mmap;
		}

		if (ph->p_va == 0 && !bias) {
			bias = 0x00008000;
		}

		if ((ret = map_ph(fd, ph, mm, &bias, 0)) != 0) {
			kprintf("load address: 0x%08x size: %d\n", ph->p_va,
				ph->p_memsz);
			goto bad_cleanup_mmap;
		}

		if (load_address_flag == 0)
			load_address = ph->p_va + bias;
		++load_address_flag;

	  /*********************************************/
		/*
		   vm_flags = 0;
		   ptep_set_u_read(&perm);
		   if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
		   if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
		   if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
		   if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

		   if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
		   goto bad_cleanup_mmap;
		   }

		   if (mm->brk_start < ph->p_va + ph->p_memsz) {
		   mm->brk_start = ph->p_va + ph->p_memsz;
		   }

		   off_t offset = ph->p_offset;
		   size_t off, size;
		   uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

		   end = ph->p_va + ph->p_filesz;
		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
		   goto bad_cleanup_mmap;
		   }
		   start += size, offset += size;
		   }

		   end = ph->p_va + ph->p_memsz;

		   if (start < la) {
		   // ph->p_memsz == ph->p_filesz 
		   if (start == end) {
		   continue ;
		   }
		   off = start + PGSIZE - la, size = PGSIZE - off;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   assert((end < la && start == end) || (end >= la && start == la));
		   }

		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   }
		 */
	  /**************************************/
	}

	mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

	/* setup user stack */
	vm_flags = VM_READ | VM_WRITE | VM_STACK;
	if ((ret =
	     mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
		    NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (is_dynamic) {
		elf->e_entry += bias;

		bias = 0;

		off_t phoff =
		    elf->e_phoff + sizeof(struct proghdr) * interp_idx;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		char *interp_path = (char *)kmalloc(ph->p_filesz);
		load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);

		int interp_fd = sysfile_open(interp_path, O_RDONLY);
		assert(interp_fd >= 0);
		struct elfhdr interp___elf, *interp_elf = &interp___elf;
		assert((ret =
			load_icode_read(interp_fd, interp_elf,
					sizeof(struct elfhdr), 0)) == 0);
		assert(interp_elf->e_magic == ELF_MAGIC);

		struct proghdr interp___ph, *interp_ph = &interp___ph;
		uint32_t interp_phnum;
		uint32_t va_min = 0xffffffff, va_max = 0;
		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			if (va_min > interp_ph->p_va)
				va_min = interp_ph->p_va;
			if (va_max < interp_ph->p_va + interp_ph->p_memsz)
				va_max = interp_ph->p_va + interp_ph->p_memsz;
		}

		bias = get_unmapped_area(mm, va_max - va_min + 1 + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);

		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			assert((ret =
				map_ph(interp_fd, interp_ph, mm, &bias,
				       1)) == 0);
		}

		real_entry = interp_elf->e_entry + bias;

		sysfile_close(interp_fd);
		kfree(interp_path);
	}

	sysfile_close(fd);

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		list_add(&(proc_mm_list), &(mm->proc_mm_link));
	}
	local_intr_restore(intr_flag);
	mm_count_inc(mm);
	current->mm = mm;
	set_pgdir(current, mm->pgdir);
	mm->cpuid = myid();
	mp_set_mm_pagetable(mm);

	if (!is_dynamic) {
		real_entry += bias;
	}
#ifdef UCONFIG_BIONIC_LIBC
	if (init_new_context_dynamic(current, elf, argc, kargv, envc, kenvp,
				     is_dynamic, real_entry, load_address,
				     bias) < 0)
		goto bad_cleanup_mmap;
#else
	if (init_new_context(current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;
#endif //UCONFIG_BIONIC_LIBC
	ret = 0;
out:
	return ret;
bad_cleanup_mmap:
	exit_mmap(mm);
bad_elf_cleanup_pgdir:
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	goto out;
}
コード例 #14
0
ファイル: proc.c プロジェクト: xuchiheng/ucore_plus
//#ifdef UCONFIG_BIONIC_LIBC
static int
map_ph(int fd, struct proghdr *ph, struct mm_struct *mm, uint32_t * pbias,
       uint32_t linker)
{
	int ret = 0;
	struct Page *page;
	uint32_t vm_flags = 0;
	uint32_t bias = 0;
	pte_perm_t perm = 0;
	ptep_set_u_read(&perm);

	if (ph->p_flags & ELF_PF_X)
		vm_flags |= VM_EXEC;
	if (ph->p_flags & ELF_PF_W)
		vm_flags |= VM_WRITE;
	if (ph->p_flags & ELF_PF_R)
		vm_flags |= VM_READ;

	if (vm_flags & VM_WRITE)
		ptep_set_u_write(&perm);

	if (pbias) {
		bias = *pbias;
	}
	if (!bias && !ph->p_va) {
		bias = get_unmapped_area(mm, ph->p_memsz + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);
		if (pbias)
			*pbias = bias;
	}

	if ((ret =
	     mm_map(mm, ph->p_va + bias, ph->p_memsz, vm_flags, NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (!linker && mm->brk_start < ph->p_va + bias + ph->p_memsz) {
		mm->brk_start = ph->p_va + bias + ph->p_memsz;
	}

	off_t offset = ph->p_offset;
	size_t off, size;
	uintptr_t start = ph->p_va + bias, end, la = ROUNDDOWN(start, PGSIZE);

	end = ph->p_va + bias + ph->p_filesz;
	while (start < end) {
		if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
			ret = -E_NO_MEM;
			goto bad_cleanup_mmap;
		}
		off = start - la, size = PGSIZE - off, la += PGSIZE;
		if (end < la) {
			size -= la - end;
		}
		if ((ret =
		     load_icode_read(fd, page2kva(page) + off, size,
				     offset)) != 0) {
			goto bad_cleanup_mmap;
		}
		start += size, offset += size;
	}

	end = ph->p_va + bias + ph->p_memsz;

	if (start < la) {
		if (start == end) {
			goto normal_exit;
		}
		off = start + PGSIZE - la, size = PGSIZE - off;
		if (end < la) {
			size -= la - end;
		}
		memset(page2kva(page) + off, 0, size);
		start += size;
		assert((end < la && start == end)
		       || (end >= la && start == la));
	}

	while (start < end) {
		if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
			ret = -E_NO_MEM;
			goto bad_cleanup_mmap;
		}
		off = start - la, size = PGSIZE - off, la += PGSIZE;
		if (end < la) {
			size -= la - end;
		}
		memset(page2kva(page) + off, 0, size);
		start += size;
	}
normal_exit:
	return 0;
bad_cleanup_mmap:
	return ret;
}
コード例 #15
0
ファイル: pfkey.c プロジェクト: Azure/sonic-bcm-lkm
static int
pfkey_send(int sd, uint8_t satype, uint8_t mtype, uint8_t dir,
    int af, union ldpd_addr *src, union ldpd_addr *dst, uint32_t spi,
    uint8_t aalg, int alen, char *akey, uint8_t ealg, int elen, char *ekey,
    uint16_t sport, uint16_t dport)
{
	struct sadb_msg		smsg;
	struct sadb_sa		sa;
	struct sadb_address	sa_src, sa_dst;
	struct sadb_key		sa_akey, sa_ekey;
	struct sadb_spirange	sa_spirange;
	struct iovec		iov[IOV_CNT];
	ssize_t			n;
	int			len = 0;
	int			iov_cnt;
	struct sockaddr_storage	smask, dmask;
	union sockunion		su_src, su_dst;

	if (!pid)
		pid = getpid();

	/* we need clean sockaddr... no ports set */
	memset(&smask, 0, sizeof(smask));

	addr2sa(af, src, 0, &su_src);

	switch (af) {
	case AF_INET:
		memset(&((struct sockaddr_in *)&smask)->sin_addr, 0xff, 32/8);
		break;
	case AF_INET6:
		memset(&((struct sockaddr_in6 *)&smask)->sin6_addr, 0xff,
		    128/8);
		break;
	default:
		return (-1);
	}
	smask.ss_family = su_src.sa.sa_family;
	smask.ss_len = sockaddr_len(&su_src.sa);

	memset(&dmask, 0, sizeof(dmask));

	addr2sa(af, dst, 0, &su_dst);

	switch (af) {
	case AF_INET:
		memset(&((struct sockaddr_in *)&dmask)->sin_addr, 0xff, 32/8);
		break;
	case AF_INET6:
		memset(&((struct sockaddr_in6 *)&dmask)->sin6_addr, 0xff,
		    128/8);
		break;
	default:
		return (-1);
	}
	dmask.ss_family = su_dst.sa.sa_family;
	dmask.ss_len = sockaddr_len(&su_dst.sa);

	memset(&smsg, 0, sizeof(smsg));
	smsg.sadb_msg_version = PF_KEY_V2;
	smsg.sadb_msg_seq = ++sadb_msg_seq;
	smsg.sadb_msg_pid = pid;
	smsg.sadb_msg_len = sizeof(smsg) / 8;
	smsg.sadb_msg_type = mtype;
	smsg.sadb_msg_satype = satype;

	switch (mtype) {
	case SADB_GETSPI:
		memset(&sa_spirange, 0, sizeof(sa_spirange));
		sa_spirange.sadb_spirange_exttype = SADB_EXT_SPIRANGE;
		sa_spirange.sadb_spirange_len = sizeof(sa_spirange) / 8;
		sa_spirange.sadb_spirange_min = 0x100;
		sa_spirange.sadb_spirange_max = 0xffffffff;
		sa_spirange.sadb_spirange_reserved = 0;
		break;
	case SADB_ADD:
	case SADB_UPDATE:
	case SADB_DELETE:
		memset(&sa, 0, sizeof(sa));
		sa.sadb_sa_exttype = SADB_EXT_SA;
		sa.sadb_sa_len = sizeof(sa) / 8;
		sa.sadb_sa_replay = 0;
		sa.sadb_sa_spi = htonl(spi);
		sa.sadb_sa_state = SADB_SASTATE_MATURE;
		break;
	}

	memset(&sa_src, 0, sizeof(sa_src));
	sa_src.sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
	sa_src.sadb_address_len =
		(sizeof(sa_src) + ROUNDUP(sockaddr_len(&su_src.sa))) / 8;

	memset(&sa_dst, 0, sizeof(sa_dst));
	sa_dst.sadb_address_exttype = SADB_EXT_ADDRESS_DST;
	sa_dst.sadb_address_len =
		(sizeof(sa_dst) + ROUNDUP(sockaddr_len(&su_dst.sa))) / 8;

	sa.sadb_sa_auth = aalg;
	sa.sadb_sa_encrypt = SADB_X_EALG_AES; /* XXX */

	switch (mtype) {
	case SADB_ADD:
	case SADB_UPDATE:
		memset(&sa_akey, 0, sizeof(sa_akey));
		sa_akey.sadb_key_exttype = SADB_EXT_KEY_AUTH;
		sa_akey.sadb_key_len = (sizeof(sa_akey) +
		    ((alen + 7) / 8) * 8) / 8;
		sa_akey.sadb_key_bits = 8 * alen;

		memset(&sa_ekey, 0, sizeof(sa_ekey));
		sa_ekey.sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
		sa_ekey.sadb_key_len = (sizeof(sa_ekey) +
		    ((elen + 7) / 8) * 8) / 8;
		sa_ekey.sadb_key_bits = 8 * elen;

		break;
	}

	iov_cnt = 0;

	/* msghdr */
	iov[iov_cnt].iov_base = &smsg;
	iov[iov_cnt].iov_len = sizeof(smsg);
	iov_cnt++;

	switch (mtype) {
	case SADB_ADD:
	case SADB_UPDATE:
	case SADB_DELETE:
		/* SA hdr */
		iov[iov_cnt].iov_base = &sa;
		iov[iov_cnt].iov_len = sizeof(sa);
		smsg.sadb_msg_len += sa.sadb_sa_len;
		iov_cnt++;
		break;
	case SADB_GETSPI:
		/* SPI range */
		iov[iov_cnt].iov_base = &sa_spirange;
		iov[iov_cnt].iov_len = sizeof(sa_spirange);
		smsg.sadb_msg_len += sa_spirange.sadb_spirange_len;
		iov_cnt++;
		break;
	}

	/* dest addr */
	iov[iov_cnt].iov_base = &sa_dst;
	iov[iov_cnt].iov_len = sizeof(sa_dst);
	iov_cnt++;
	iov[iov_cnt].iov_base = &su_dst;
	iov[iov_cnt].iov_len = ROUNDUP(sockaddr_len(&su_dst.sa));
	smsg.sadb_msg_len += sa_dst.sadb_address_len;
	iov_cnt++;

	/* src addr */
	iov[iov_cnt].iov_base = &sa_src;
	iov[iov_cnt].iov_len = sizeof(sa_src);
	iov_cnt++;
	iov[iov_cnt].iov_base = &su_src;
	iov[iov_cnt].iov_len = ROUNDUP(sockaddr_len(&su_src.sa));
	smsg.sadb_msg_len += sa_src.sadb_address_len;
	iov_cnt++;

	switch (mtype) {
	case SADB_ADD:
	case SADB_UPDATE:
		if (alen) {
			/* auth key */
			iov[iov_cnt].iov_base = &sa_akey;
			iov[iov_cnt].iov_len = sizeof(sa_akey);
			iov_cnt++;
			iov[iov_cnt].iov_base = akey;
			iov[iov_cnt].iov_len = ((alen + 7) / 8) * 8;
			smsg.sadb_msg_len += sa_akey.sadb_key_len;
			iov_cnt++;
		}
		if (elen) {
			/* encryption key */
			iov[iov_cnt].iov_base = &sa_ekey;
			iov[iov_cnt].iov_len = sizeof(sa_ekey);
			iov_cnt++;
			iov[iov_cnt].iov_base = ekey;
			iov[iov_cnt].iov_len = ((elen + 7) / 8) * 8;
			smsg.sadb_msg_len += sa_ekey.sadb_key_len;
			iov_cnt++;
		}
		break;
	}

	len = smsg.sadb_msg_len * 8;
	do {
		n = writev(sd, iov, iov_cnt);
	} while (n == -1 && (errno == EAGAIN || errno == EINTR));

	if (n == -1) {
		log_warn("writev (%d/%d)", iov_cnt, len);
		return (-1);
	}

	return (0);
}
コード例 #16
0
ファイル: object.c プロジェクト: bradfordw/prophet
size_t OCI_ObjectGetStructSize
(
    OCI_TypeInfo *typinf
)
{
    size_t size1 = 0;
    size_t size2 = 0;

    int type1 = 0;
    int type2 = 0;

    ub2 i;

    boolean align = FALSE;

    size_t size = 0;

    if (typinf->struct_size != 0)
    {
        size = typinf->struct_size;
    }
    else
    {
        for (i = 0; i < typinf->nb_cols; i++)
        {
            align = FALSE;

            if (i > 0)
            {
                size1 = size2;
                type1 = type2;

                typinf->offsets[i] = (int) size;
            }
            else
            {
                OCI_ObjectGetAttrInfo(typinf, i, &size1, &type1);

                typinf->offsets[i] = 0;
            }

            OCI_ObjectGetAttrInfo(typinf, i+1, &size2, &type2);

            switch (OCI_OFFSET_PAIR(type1, type2))
            {
                case OCI_OFFSET_PAIR(OCI_OFT_NUMBER, OCI_OFT_POINTER):
                case OCI_OFFSET_PAIR(OCI_OFT_DATE,   OCI_OFT_POINTER):
                case OCI_OFFSET_PAIR(OCI_OFT_OBJECT, OCI_OFT_POINTER):
             //   case OCI_OFFSET_PAIR(OCI_OFT_NUMBER, OCI_OFT_OBJECT):
             //   case OCI_OFFSET_PAIR(OCI_OFT_DATE,   OCI_OFT_OBJECT):
             //   case OCI_OFFSET_PAIR(OCI_OFT_OBJECT, OCI_OFT_OBJECT):
                {
                    align = TRUE;
                    break;
                }
            }

            size += size1;

            if (align)
            {
                size = ROUNDUP(size, OCI_DEF_ALIGN);
            }
        }

        typinf->struct_size = size + size2;
    }

    return size;
}
コード例 #17
0
ファイル: osystem_gfx.cpp プロジェクト: 0xf1sh/scummvm
void OSystem_Wii::initSize(uint width, uint height,
							const Graphics::PixelFormat *format) {
	bool update = false;
	gfx_tex_format_t tex_format;

#ifdef USE_RGB_COLOR
	Graphics::PixelFormat newFormat;

	if (format)
		newFormat = *format;
	else
		newFormat = Graphics::PixelFormat::createFormatCLUT8();

	if (newFormat.bytesPerPixel > 2)
		newFormat = Graphics::PixelFormat::createFormatCLUT8();

	if (_pfGame != newFormat) {
		_pfGame = newFormat;
		update = true;
	}
#endif

	uint newWidth, newHeight;
	if (_pfGame.bytesPerPixel > 1) {
		newWidth = ROUNDUP(width, 4);
		newHeight = ROUNDUP(height, 4);
	} else {
		newWidth = ROUNDUP(width, 8);
		newHeight = ROUNDUP(height, 4);
	}

	if (_gameWidth != newWidth || _gameHeight != newHeight) {
		assert((newWidth <= 640) && (newHeight <= 480));

		if (width != newWidth || height != newHeight)
			printf("extending texture for compability: %ux%u -> %ux%u\n",
					width, height, newWidth, newHeight);

		_gameWidth = newWidth;
		_gameHeight = newHeight;
		update = true;
	}

	if (_gameRunning) {
		switchVideoMode(_configGraphicsMode);

		if (_arCorrection && (_gameWidth == 320) && (_gameHeight == 200))
			gfx_set_ar(320.0 / 240.0);
		else
			gfx_set_ar(f32(_gameWidth) / f32(_gameHeight));
	}

	if (update) {
		free(_gamePixels);

		tex_format = GFX_TF_PALETTE_RGB565;

#ifdef USE_RGB_COLOR
		if (_pfGame.bytesPerPixel > 1) {
			tex_format = GFX_TF_RGB565;
			_pfGameTexture = _pfRGB565;
		}

		printf("initSize %u*%u*%u (%u%u%u -> %u%u%u match: %d)\n",
				_gameWidth, _gameHeight, _pfGame.bytesPerPixel * 8,
				8 - _pfGame.rLoss, 8 - _pfGame.gLoss, 8 - _pfGame.bLoss,
				8 - _pfGameTexture.rLoss, 8 - _pfGameTexture.gLoss,
				8 - _pfGameTexture.bLoss, _pfGame == _pfGameTexture);

		_gamePixels = (u8 *) memalign(32, _gameWidth * _gameHeight *
										_pfGame.bytesPerPixel);
		memset(_gamePixels, 0, _gameWidth * _gameHeight *
				_pfGame.bytesPerPixel);
#else
		printf("initSize %u*%u\n", _gameWidth, _gameHeight);

		_gamePixels = (u8 *) memalign(32, _gameWidth * _gameHeight);
		memset(_gamePixels, 0, _gameWidth * _gameHeight);
#endif

		if (!gfx_tex_init(&_texGame, tex_format, TLUT_GAME,
					_gameWidth, _gameHeight)) {
			printf("could not init the game texture\n");
			::abort();
		}

		gfx_tex_set_bilinear_filter(&_texGame, _bilinearFilter);
		gfx_coords(&_coordsGame, &_texGame, GFX_COORD_FULLSCREEN);

		updateScreenResolution();
	}
}
コード例 #18
0
ファイル: mmu.c プロジェクト: qioixiy/harvey
void
mmuinit(void)
{
	Proc *up = externup();
	uint8_t *p;
	Page *page;
	uint64_t o, pa, r, sz;

	archmmu();
	DBG("mach%d: %#p pml4 %#p npgsz %d\n", machp()->machno, m, machp()->pml4, m->npgsz);

	if(machp()->machno != 0){
		/* NIX: KLUDGE: Has to go when each mach is using
		 * its own page table
		 */
		p = UINT2PTR(m->stack);
		p += MACHSTKSZ;

		memmove(p, UINT2PTR(mach0pml4.va), PTSZ);
		machp()->pml4 = &machp()->pml4kludge;
		machp()->pml4->va = PTR2UINT(p);
		machp()->pml4->pa = PADDR(p);
		machp()->pml4->daddr = mach0pml4.daddr;	/* # of user mappings in pml4 */

		r = rdmsr(Efer);
		r |= Nxe;
		wrmsr(Efer, r);
		cr3put(machp()->pml4->pa);
		DBG("m %#p pml4 %#p\n", m, machp()->pml4);
		return;
	}

	page = &mach0pml4;
	page->pa = cr3get();
	page->va = PTR2UINT(KADDR(page->pa));

	machp()->pml4 = page;

	r = rdmsr(Efer);
	r |= Nxe;
	wrmsr(Efer, r);

	/*
	 * Set up the various kernel memory allocator limits:
	 * pmstart/pmend bound the unused physical memory;
	 * vmstart/vmend bound the total possible virtual memory
	 * used by the kernel;
	 * vmunused is the highest virtual address currently mapped
	 * and used by the kernel;
	 * vmunmapped is the highest virtual address currently
	 * mapped by the kernel.
	 * Vmunused can be bumped up to vmunmapped before more
	 * physical memory needs to be allocated and mapped.
	 *
	 * This is set up here so meminit can map appropriately.
	 */
	o = sys->pmstart;
	sz = ROUNDUP(o, 4*MiB) - o;
	pa = asmalloc(0, sz, 1, 0);
	if(pa != o)
		panic("mmuinit: pa %#llux memstart %#llux\n", pa, o);
	sys->pmstart += sz;

	sys->vmstart = KSEG0;
	sys->vmunused = sys->vmstart + ROUNDUP(o, 4*KiB);
	sys->vmunmapped = sys->vmstart + o + sz;
	sys->vmend = sys->vmstart + TMFM;

	print("mmuinit: vmstart %#p vmunused %#p vmunmapped %#p vmend %#p\n",
		sys->vmstart, sys->vmunused, sys->vmunmapped, sys->vmend);

	/*
	 * Set up the map for PD entry access by inserting
	 * the relevant PDP entry into the PD. It's equivalent
	 * to PADDR(sys->pd)|PteRW|PteP.
	 *
	 */
	sys->pd[PDX(PDMAP)] = sys->pdp[PDPX(PDMAP)] & ~(PteD|PteA);
	print("sys->pd %#p %#p\n", sys->pd[PDX(PDMAP)], sys->pdp[PDPX(PDMAP)]);
	assert((pdeget(PDMAP) & ~(PteD|PteA)) == (PADDR(sys->pd)|PteRW|PteP));


	dumpmmuwalk(KZERO);

	mmuphysaddr(PTR2UINT(end));
}
コード例 #19
0
bool
TracingMetaData::_InitPreviousTracingData()
{
	// TODO: ATM re-attaching the previous tracing buffer doesn't work very
	// well. The entries should be checked more thoroughly for validity -- e.g.
	// the pointers to the entries' vtable pointers could be invalid, which can
	// make the "traced" command quite unusable. The validity of the entries
	// could be checked in a safe environment (i.e. with a fault handler) with
	// typeid() and call of a virtual function.
	return false;

	addr_t bufferStart
		= (addr_t)fTraceOutputBuffer + kTraceOutputBufferSize;
	addr_t bufferEnd = bufferStart + MAX_TRACE_SIZE;

	if (bufferStart > bufferEnd || (addr_t)fBuffer != bufferStart
		|| (addr_t)fFirstEntry % sizeof(trace_entry) != 0
		|| (addr_t)fFirstEntry < bufferStart
		|| (addr_t)fFirstEntry + sizeof(trace_entry) >= bufferEnd
		|| (addr_t)fAfterLastEntry % sizeof(trace_entry) != 0
		|| (addr_t)fAfterLastEntry < bufferStart
		|| (addr_t)fAfterLastEntry > bufferEnd
		|| fPhysicalAddress == 0) {
		dprintf("Failed to init tracing meta data: Sanity checks "
			"failed.\n");
		return false;
	}

	// re-map the previous tracing buffer
	virtual_address_restrictions virtualRestrictions = {};
	virtualRestrictions.address = fTraceOutputBuffer;
	virtualRestrictions.address_specification = B_EXACT_ADDRESS;
	physical_address_restrictions physicalRestrictions = {};
	physicalRestrictions.low_address = fPhysicalAddress;
	physicalRestrictions.high_address = fPhysicalAddress
		+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
	area_id area = create_area_etc(B_SYSTEM_TEAM, "tracing log",
		kTraceOutputBufferSize + MAX_TRACE_SIZE, B_CONTIGUOUS,
		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_CLEAR, 0,
		&virtualRestrictions, &physicalRestrictions, NULL);
	if (area < 0) {
		dprintf("Failed to init tracing meta data: Mapping tracing log "
			"buffer failed: %s\n", strerror(area));
		return false;
	}

	dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE "\n",
		fTraceOutputBuffer, kTraceOutputBufferSize + MAX_TRACE_SIZE);

	// verify/repair the tracing entry list
	uint32 errorCount = 0;
	uint32 entryCount = 0;
	uint32 nonBufferEntryCount = 0;
	uint32 previousEntrySize = 0;
	trace_entry* entry = fFirstEntry;
	while (errorCount <= kMaxRecoveringErrorCount) {
		// check previous entry size
		if (entry->previous_size != previousEntrySize) {
			if (entry != fFirstEntry) {
				dprintf("ktrace recovering: entry %p: fixing previous_size "
					"size: %" B_PRIu32 " (should be %" B_PRIu32 ")\n", entry,
					entry->previous_size, previousEntrySize);
				errorCount++;
			}
			entry->previous_size = previousEntrySize;
		}

		if (entry == fAfterLastEntry)
			break;

		// check size field
		if ((entry->flags & WRAP_ENTRY) == 0 && entry->size == 0) {
			dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
				entry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry->size > uint32(fBuffer + kBufferSize - entry)) {
			dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32 "\n",
				entry, entry->size);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		if (entry < fAfterLastEntry && entry + entry->size > fAfterLastEntry) {
			dprintf("ktrace recovering: entry %p: entry crosses "
				"fAfterLastEntry (%p)\n", entry, fAfterLastEntry);
			errorCount++;
			fAfterLastEntry = entry;
			break;
		}

		// check for wrap entry
		if ((entry->flags & WRAP_ENTRY) != 0) {
			if ((uint32)(fBuffer + kBufferSize - entry)
					> kMaxTracingEntryByteSize / sizeof(trace_entry)) {
				dprintf("ktrace recovering: entry %p: wrap entry at invalid "
					"buffer location\n", entry);
				errorCount++;
			}

			if (entry->size != 0) {
				dprintf("ktrace recovering: entry %p: invalid wrap entry "
					"size: %" B_PRIu32 "\n", entry, entry->size);
				errorCount++;
				entry->size = 0;
			}

			previousEntrySize = fBuffer + kBufferSize - entry;
			entry = fBuffer;
			continue;
		}

		if ((entry->flags & BUFFER_ENTRY) == 0) {
			entry->flags |= CHECK_ENTRY;
			nonBufferEntryCount++;
		}

		entryCount++;
		previousEntrySize = entry->size;

		entry += entry->size;
	}

	if (errorCount > kMaxRecoveringErrorCount) {
		dprintf("ktrace recovering: Too many errors.\n");
		fAfterLastEntry = entry;
		fAfterLastEntry->previous_size = previousEntrySize;
	}

	dprintf("ktrace recovering: Recovered %" B_PRIu32 " entries + %" B_PRIu32
		" buffer entries from previous session. Expected %" B_PRIu32
		" entries.\n", nonBufferEntryCount, entryCount - nonBufferEntryCount,
		fEntries);
	fEntries = nonBufferEntryCount;

	B_INITIALIZE_SPINLOCK(&fLock);

	// TODO: Actually check the entries! Do that when first accessing the
	// tracing buffer from the kernel debugger (when sTracingDataRecovered is
	// true).
	sTracingDataRecovered = true;
	return true;
}
コード例 #20
0
ファイル: ipaddress_sysctl.c プロジェクト: fenner/net-snmp
/**
 *
 * @retval  0 no errors
 * @retval !0 errors
 */
int
netsnmp_arch_ipaddress_container_load(netsnmp_container *container,
                                      u_int load_flags)
{
    netsnmp_ipaddress_entry *entry = NULL;
    u_char *if_list = NULL, *cp;
    size_t if_list_size = 0;
    int sysctl_oid[] = { CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST, 0 };
    struct ifa_msghdr *ifa;
    struct sockaddr *a;
    int amask;
    int rc = 0;
    int idx_offset = 0;

    DEBUGMSGTL(("access:ipaddress:container:sysctl",
                "load (flags %u)\n", load_flags));

    if (NULL == container) {
        snmp_log(LOG_ERR, "no container specified/found for interface\n");
        return -1;
    }

    if (sysctl(sysctl_oid, sizeof(sysctl_oid)/sizeof(int), 0,
               &if_list_size, 0, 0) == -1) {
        snmp_log(LOG_ERR, "could not get interface info (size)\n");
        return -2;
    }

    if_list = (u_char*)malloc(if_list_size);
    if (if_list == NULL) {
        snmp_log(LOG_ERR, "could not allocate memory for interface info "
                 "(%u bytes)\n", (unsigned) if_list_size);
        return -3;
    } else {
        DEBUGMSGTL(("access:ipaddress:container:sysctl",
                    "allocated %u bytes for if_list\n",
                    (unsigned) if_list_size));
    }

    if (sysctl(sysctl_oid, sizeof(sysctl_oid)/sizeof(int), if_list,
               &if_list_size, 0, 0) == -1) {
        snmp_log(LOG_ERR, "could not get interface info\n");
        free(if_list);
        return -2;
    }

    /* pass 2: walk addresses */
    for (cp = if_list; cp < if_list + if_list_size; cp += ifa->ifam_msglen) {
        ifa = (struct ifa_msghdr *) cp;
        int rtax;

        if (ifa->ifam_type != RTM_NEWADDR)
            continue;

        DEBUGMSGTL(("access:ipaddress:container:sysctl",
                    "received 0x%x in RTM_NEWADDR for ifindex %u\n",
                    ifa->ifam_addrs, ifa->ifam_index));

        entry = netsnmp_access_ipaddress_entry_create();
        if (entry == NULL) {
            rc = -3;
            break;
        }

        a = (struct sockaddr *) (ifa + 1);
        entry->ia_status = IPADDRESSSTATUSTC_UNKNOWN;
        entry->ia_origin = IPADDRESSORIGINTC_OTHER;
	entry->ia_address_len = 0;
        for (amask = ifa->ifam_addrs, rtax = 0; amask != 0; amask >>= 1, rtax++) {
            if ((amask & 1) != 0) {
                entry->ns_ia_index = ++idx_offset;
                entry->if_index = ifa->ifam_index;
                DEBUGMSGTL(("access:ipaddress:container:sysctl",
                            "%d: a=%p, sa_len=%d, sa_family=0x%x\n",
                            (int)entry->if_index, a, a->sa_len, a->sa_family));

                if (a->sa_family == AF_INET || a->sa_family == 0) {
                    struct sockaddr_in *a4 = (struct sockaddr_in *)a;
		    char str[128];
		    DEBUGMSGTL(("access:ipaddress:container:sysctl",
		                "IPv4 addr %s\n", inet_ntop(AF_INET, &a4->sin_addr.s_addr, str, 128)));
                    if (rtax == RTAX_IFA) {
			entry->ia_address_len = 4;
                        memcpy(entry->ia_address, &a4->sin_addr.s_addr, entry->ia_address_len);
		    }
                    else if (rtax == RTAX_NETMASK)
                        entry->ia_prefix_len = netsnmp_ipaddress_ipv4_prefix_len(a4->sin_addr.s_addr);
                }
                else if (a->sa_family == AF_INET6) {
                    struct sockaddr_in6 *a6 = (struct sockaddr_in6 *)a;
		    char str[128];
		    DEBUGMSGTL(("access:ipaddress:container:sysctl",
		                "IPv6 addr %s\n", inet_ntop(AF_INET6, &a6->sin6_addr.s6_addr, str, 128)));
                    if (rtax == RTAX_IFA) {
			entry->ia_address_len = 16;
                        memcpy(entry->ia_address, &a6->sin6_addr, entry->ia_address_len);
		    }
                    else if (rtax == RTAX_NETMASK) {
                        entry->ia_prefix_len = netsnmp_ipaddress_ipv6_prefix_len(a6->sin6_addr);
			DEBUGMSGTL(("access:ipaddress:container:sysctl",
			            "prefix_len=%d\n", entry->ia_prefix_len));
		    }
                }
                a = (struct sockaddr *) ( ((char *) a) + ROUNDUP(a->sa_len) );
            }
        }
	if (entry->ia_address_len == 0) {
	    DEBUGMSGTL(("access:ipaddress:container:sysctl",
	                "entry skipped\n"));
	    netsnmp_access_ipaddress_entry_free(entry);
	}
	else if (CONTAINER_INSERT(container, entry) < 0) {
            DEBUGMSGTL(("access:ipaddress:container","error with ipaddress_entry: insert into container failed.\n"));
            netsnmp_access_ipaddress_entry_free(entry);
            continue;
        }
    }

    if (if_list != NULL)
        free(if_list);

    return 0;
}
コード例 #21
0
/* THIS SAMPLE CODE IS PROVIDED AS IS AND IS SUBJECT TO ALTERATIONS. FUJITSU */
/* MICROELECTRONICS ACCEPTS NO RESPONSIBILITY OR LIABILITY FOR ANY ERRORS OR */
/* ELIGIBILITY FOR ANY PURPOSES.                                             */
/*                 (C) Fujitsu Microelectronics Europe GmbH                  */
/*---------------------------------------------------------------------------
  __STD_LIB_sbrk.C
  - Used by heap_3.c for memory accocation and deletion.

/*---------------------------------------------------------------------------*/

#include "FreeRTOSConfig.h"
#include <stdlib.h>

	static  long         brk_siz  =  0;
	typedef int          _heep_t;
	#define ROUNDUP(s)   (((s)+sizeof(_heep_t)-1)&~(sizeof(_heep_t)-1))
	static  _heep_t      _heep[ROUNDUP(configTOTAL_HEAP_SIZE)/sizeof(_heep_t)];
	#define              _heep_size      ROUNDUP(configTOTAL_HEAP_SIZE)

	extern  char  *sbrk(int  size)
	{
	   if  (brk_siz  +  size  >  _heep_size  ||  brk_siz  +  size  <  0)

          return((char*)-1);
	   brk_siz  +=  size;
	   return(  (char*)_heep  +  brk_siz  -  size);
	}

コード例 #22
0
void
ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
	bool updatePageQueue)
{
	if (size == 0)
		return;

	addr_t start = base;
	addr_t end = base + size - 1;

	TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
		B_PRIxADDR ")\n", area, start, end);

	page_directory_entry* pd = fPagingStructures->pgdir_virt;

	VMAreaMappings queue;

	RecursiveLocker locker(fLock);

	do {
		int index = VADDR_TO_PDENT(start);
		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
			// no page table here, move the start up to access the next page
			// table
			start = ROUNDUP(start + 1, kPageTableAlignment);
			continue;
		}

		Thread* thread = thread_get_current_thread();
		ThreadCPUPinner pinner(thread);

		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
			pd[index] & ARM_PDE_ADDRESS_MASK);

		for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
				index++, start += B_PAGE_SIZE) {
			page_table_entry oldEntry
				= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
			if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
				continue;

			fMapCount--;

			if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
				// Note, that we only need to invalidate the address, if the
				// accessed flags was set, since only then the entry could have
				// been in any TLB.
				InvalidatePage(start);
			}

			if (area->cache_type != CACHE_TYPE_DEVICE) {
				// get the page
				vm_page* page = vm_lookup_page(
					(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
				ASSERT(page != NULL);

				DEBUG_PAGE_ACCESS_START(page);

				// transfer the accessed/dirty flags to the page
				if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
					page->accessed = true;
				if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
					page->modified = true;

				// remove the mapping object/decrement the wired_count of the
				// page
				if (area->wiring == B_NO_LOCK) {
					vm_page_mapping* mapping = NULL;
					vm_page_mappings::Iterator iterator
						= page->mappings.GetIterator();
					while ((mapping = iterator.Next()) != NULL) {
						if (mapping->area == area)
							break;
					}

					ASSERT(mapping != NULL);

					area->mappings.Remove(mapping);
					page->mappings.Remove(mapping);
					queue.Add(mapping);
				} else
					page->DecrementWiredCount();

				if (!page->IsMapped()) {
					atomic_add(&gMappedPagesCount, -1);

					if (updatePageQueue) {
						if (page->Cache()->temporary)
							vm_page_set_state(page, PAGE_STATE_INACTIVE);
						else if (page->modified)
							vm_page_set_state(page, PAGE_STATE_MODIFIED);
						else
							vm_page_set_state(page, PAGE_STATE_CACHED);
					}
				}

				DEBUG_PAGE_ACCESS_END(page);
			}
		}

		Flush();
			// flush explicitly, since we directly use the lock
	} while (start != 0 && start < end);

	// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
	// really critical here, as in all cases this method is used, the unmapped
	// area range is unmapped for good (resized/cut) and the pages will likely
	// be freed.

	locker.Unlock();

	// free removed mappings
	bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
	uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
		| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
	while (vm_page_mapping* mapping = queue.RemoveHead())
		object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
}
コード例 #23
0
ファイル: mmc_wrapper.c プロジェクト: KortanZ/linux
/*
 * Function: mmc_write
 * Arg     : Data address on card, data length, i/p buffer
 * Return  : 0 on Success, non zero on failure
 * Flow    : Write the data from in to the card
 */
uint32_t mmc_write(uint64_t data_addr, uint32_t data_len, void *in)
{
	uint32_t val = 0;
	int ret = 0;
	uint32_t block_size = 0;
	uint32_t write_size = SDHCI_ADMA_MAX_TRANS_SZ;
	uint8_t *sptr = (uint8_t *)in;
	void *dev;

	dev = target_mmc_device();

	block_size = mmc_get_device_blocksize();

	ASSERT(!(data_addr % block_size));

	if (data_len % block_size)
		data_len = ROUNDUP(data_len, block_size);

	/*
	 * Flush the cache before handing over the data to
	 * storage driver
	 */
	arch_clean_invalidate_cache_range((addr_t)in, data_len);

	if (target_mmc_device())
	{
		/* TODO: This function is aware of max data that can be
		 * tranferred using sdhci adma mode, need to have a cleaner
		 * implementation to keep this function independent of sdhci
		 * limitations
		 */
		while (data_len > write_size) {
			val = mmc_sdhci_write((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (write_size / block_size));
			if (val)
			{
				dprintf(CRITICAL, "Failed Writing block @ %x\n",(unsigned int)(data_addr / block_size));
				return val;
			}
			sptr += write_size;
			data_addr += write_size;
			data_len -= write_size;
		}

		if (data_len)
			val = mmc_sdhci_write((struct mmc_device *)dev, (void *)sptr, (data_addr / block_size), (data_len / block_size));

		if (val)
			dprintf(CRITICAL, "Failed Writing block @ %x\n",(unsigned int)(data_addr / block_size));
	}
	else
	{
		ret = ufs_write((struct ufs_dev *)dev, data_addr, (addr_t)in, (data_len / block_size));

		if (ret)
		{
			dprintf(CRITICAL, "Error: UFS write failed writing to block: %llu\n", data_addr);
			val = 1;
		}
	}

	return val;
}
コード例 #24
0
status_t
ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
	uint32 memoryType)
{
	start = ROUNDDOWN(start, B_PAGE_SIZE);
	if (start >= end)
		return B_OK;

	TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
		attributes);
#if 0 //IRA
	// compute protection flags
	uint32 newProtectionFlags = 0;
	if ((attributes & B_USER_PROTECTION) != 0) {
		newProtectionFlags = ARM_PTE_USER;
		if ((attributes & B_WRITE_AREA) != 0)
			newProtectionFlags |= ARM_PTE_WRITABLE;
	} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
		newProtectionFlags = ARM_PTE_WRITABLE;

	page_directory_entry *pd = fPagingStructures->pgdir_virt;

	do {
		int index = VADDR_TO_PDENT(start);
		if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
			// no page table here, move the start up to access the next page
			// table
			start = ROUNDUP(start + 1, kPageTableAlignment);
			continue;
		}

		Thread* thread = thread_get_current_thread();
		ThreadCPUPinner pinner(thread);

		page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
			pd[index] & ARM_PDE_ADDRESS_MASK);

		for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
				index++, start += B_PAGE_SIZE) {
			page_table_entry entry = pt[index];
			if ((entry & ARM_PTE_PRESENT) == 0) {
				// page mapping not valid
				continue;
			}

			TRACE("protect_tmap: protect page 0x%lx\n", start);

			// set the new protection flags -- we want to do that atomically,
			// without changing the accessed or dirty flag
			page_table_entry oldEntry;
			while (true) {
				oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
					&pt[index],
					(entry & ~(ARM_PTE_PROTECTION_MASK
							| ARM_PTE_MEMORY_TYPE_MASK))
						| newProtectionFlags
						| ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
							memoryType),
					entry);
				if (oldEntry == entry)
					break;
				entry = oldEntry;
			}

			if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
				// Note, that we only need to invalidate the address, if the
				// accessed flag was set, since only then the entry could have
				// been in any TLB.
				InvalidatePage(start);
			}
		}
	} while (start != 0 && start < end);
#endif
	return B_OK;
}
コード例 #25
0
/*!	Must be called with this address space's write lock held */
status_t
VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
	uint32 addressSpec, VMUserArea* area, uint32 allocationFlags)
{
	VMUserArea* last = NULL;
	VMUserArea* next;
	bool foundSpot = false;

	TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
		"0x%lx, size %ld, end 0x%lx, addressSpec %ld, area %p\n", this, start,
		size, end, addressSpec, area));

	// do some sanity checking
	if (start < fBase || size == 0 || end > fEndAddress
		|| start + (size - 1) > end)
		return B_BAD_ADDRESS;

	if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
		// search for a reserved area
		status_t status = _InsertAreaIntoReservedRegion(start, size, area,
			allocationFlags);
		if (status == B_OK || status == B_BAD_VALUE)
			return status;

		// There was no reserved area, and the slot doesn't seem to be used
		// already
		// TODO: this could be further optimized.
	}

	size_t alignment = B_PAGE_SIZE;
	if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
		// align the memory to the next power of two of the size
		while (alignment < size)
			alignment <<= 1;
	}

	start = ROUNDUP(start, alignment);

	// walk up to the spot where we should start searching
second_chance:
	VMUserAreaList::Iterator it = fAreas.GetIterator();
	while ((next = it.Next()) != NULL) {
		if (next->Base() > start + (size - 1)) {
			// we have a winner
			break;
		}

		last = next;
	}

	// find the right spot depending on the address specification - the area
	// will be inserted directly after "last" ("next" is not referenced anymore)

	switch (addressSpec) {
		case B_ANY_ADDRESS:
		case B_ANY_KERNEL_ADDRESS:
		case B_ANY_KERNEL_BLOCK_ADDRESS:
		{
			// find a hole big enough for a new area
			if (last == NULL) {
				// see if we can build it at the beginning of the virtual map
				addr_t alignedBase = ROUNDUP(fBase, alignment);
				if (is_valid_spot(fBase, alignedBase, size,
						next == NULL ? end : next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
					alignment);
				if (is_valid_spot(last->Base() + (last->Size() - 1),
						alignedBase, size, next->Base())) {
					foundSpot = true;
					area->SetBase(alignedBase);
					break;
				}

				last = next;
				next = it.Next();
			}

			if (foundSpot)
				break;

			addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
				alignment);
			if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
					size, end)) {
				// got a spot
				foundSpot = true;
				area->SetBase(alignedBase);
				break;
			} else if (area->id != RESERVED_AREA_ID) {
				// We didn't find a free spot - if there are any reserved areas,
				// we can now test those for free space
				// TODO: it would make sense to start with the biggest of them
				it.Rewind();
				next = it.Next();
				for (last = NULL; next != NULL; next = it.Next()) {
					if (next->id != RESERVED_AREA_ID) {
						last = next;
						continue;
					}

					// TODO: take free space after the reserved area into
					// account!
					addr_t alignedBase = ROUNDUP(next->Base(), alignment);
					if (next->Base() == alignedBase && next->Size() == size) {
						// The reserved area is entirely covered, and thus,
						// removed
						fAreas.Remove(next);

						foundSpot = true;
						area->SetBase(alignedBase);
						next->~VMUserArea();
						free_etc(next, allocationFlags);
						break;
					}

					if ((next->protection & RESERVED_AVOID_BASE) == 0
						&&  alignedBase == next->Base()
						&& next->Size() >= size) {
						// The new area will be placed at the beginning of the
						// reserved area and the reserved area will be offset
						// and resized
						foundSpot = true;
						next->SetBase(next->Base() + size);
						next->SetSize(next->Size() - size);
						area->SetBase(alignedBase);
						break;
					}

					if (is_valid_spot(next->Base(), alignedBase, size,
							next->Base() + (next->Size() - 1))) {
						// The new area will be placed at the end of the
						// reserved area, and the reserved area will be resized
						// to make space
						alignedBase = ROUNDDOWN(
							next->Base() + next->Size() - size, alignment);

						foundSpot = true;
						next->SetSize(alignedBase - next->Base());
						area->SetBase(alignedBase);
						last = next;
						break;
					}

					last = next;
				}
			}
			break;
		}

		case B_BASE_ADDRESS:
		{
			// find a hole big enough for a new area beginning with "start"
			if (last == NULL) {
				// see if we can build it at the beginning of the specified
				// start
				if (next == NULL || next->Base() > start + (size - 1)) {
					foundSpot = true;
					area->SetBase(start);
					break;
				}

				last = next;
				next = it.Next();
			}

			// keep walking
			while (next != NULL) {
				if (next->Base() - (last->Base() + last->Size()) >= size) {
					// we found a spot (it'll be filled up below)
					break;
				}

				last = next;
				next = it.Next();
			}

			addr_t lastEnd = last->Base() + (last->Size() - 1);
			if (next != NULL || end - lastEnd >= size) {
				// got a spot
				foundSpot = true;
				if (lastEnd < start)
					area->SetBase(start);
				else
					area->SetBase(lastEnd + 1);
				break;
			}

			// we didn't find a free spot in the requested range, so we'll
			// try again without any restrictions
			start = fBase;
			addressSpec = B_ANY_ADDRESS;
			last = NULL;
			goto second_chance;
		}

		case B_EXACT_ADDRESS:
			// see if we can create it exactly here
			if ((last == NULL || last->Base() + (last->Size() - 1) < start)
				&& (next == NULL || next->Base() > start + (size - 1))) {
				foundSpot = true;
				area->SetBase(start);
				break;
			}
			break;
		default:
			return B_BAD_VALUE;
	}

	if (!foundSpot)
		return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;

	area->SetSize(size);
	if (last)
		fAreas.Insert(fAreas.GetNext(last), area);
	else
		fAreas.Insert(fAreas.Head(), area);

	IncrementChangeCount();
	return B_OK;
}
コード例 #26
0
static isc_result_t
internal_current(isc_interfaceiter_t *iter) {
	struct ifa_msghdr *ifam, *ifam_end;

	REQUIRE(VALID_IFITER(iter));
	REQUIRE (iter->pos < (unsigned int) iter->bufused);

	ifam = (struct ifa_msghdr *) ((char *) iter->buf + iter->pos);
	ifam_end = (struct ifa_msghdr *) ((char *) iter->buf + iter->bufused);

	if (ifam->ifam_type == RTM_IFINFO) {
		struct if_msghdr *ifm = (struct if_msghdr *) ifam;
		struct sockaddr_dl *sdl = (struct sockaddr_dl *) (ifm + 1);
		unsigned int namelen;

		memset(&iter->current, 0, sizeof(iter->current));

		iter->current.ifindex = sdl->sdl_index;
		namelen = sdl->sdl_nlen;
		if (namelen > sizeof(iter->current.name) - 1)
			namelen = sizeof(iter->current.name) - 1;

		memset(iter->current.name, 0, sizeof(iter->current.name));
		memcpy(iter->current.name, sdl->sdl_data, namelen);

		iter->current.flags = 0;

		if ((ifam->ifam_flags & IFF_UP) != 0)
			iter->current.flags |= INTERFACE_F_UP;

		if ((ifam->ifam_flags & IFF_POINTOPOINT) != 0)
			iter->current.flags |= INTERFACE_F_POINTTOPOINT;

		if ((ifam->ifam_flags & IFF_LOOPBACK) != 0)
			iter->current.flags |= INTERFACE_F_LOOPBACK;

		if ((ifam->ifam_flags & IFF_BROADCAST) != 0)
			iter->current.flags |= INTERFACE_F_BROADCAST;

#ifdef IFF_MULTICAST
		if ((ifam->ifam_flags & IFF_MULTICAST) != 0)
			iter->current.flags |= INTERFACE_F_MULTICAST;
#endif

		/*
		 * This is not an interface address.
		 * Force another iteration.
		 */
		return (ISC_R_IGNORE);
	} else if (ifam->ifam_type == RTM_NEWADDR) {
		int i;
		int family;
		struct sockaddr *mask_sa = NULL;
		struct sockaddr *addr_sa = NULL;
		struct sockaddr *dst_sa = NULL;

		struct sockaddr *sa = (struct sockaddr *)(ifam + 1);
		family = sa->sa_family;

		for (i = 0; i < RTAX_MAX; i++)
		{
			if ((ifam->ifam_addrs & (1 << i)) == 0)
				continue;

			INSIST(sa < (struct sockaddr *) ifam_end);

			switch (i) {
			case RTAX_NETMASK: /* Netmask */
				mask_sa = sa;
				break;
			case RTAX_IFA: /* Interface address */
				addr_sa = sa;
				break;
			case RTAX_BRD: /* Broadcast or destination address */
				dst_sa = sa;
				break;
			}
#ifdef ISC_PLATFORM_HAVESALEN
			sa = (struct sockaddr *)((char*)(sa)
					 + ROUNDUP(sa->sa_len));
#else
#ifdef sgi
			/*
			 * Do as the contributed SGI code does.
			 */
			sa = (struct sockaddr *)((char*)(sa)
					 + ROUNDUP(_FAKE_SA_LEN_DST(sa)));
#else
			/* XXX untested. */
			sa = (struct sockaddr *)((char*)(sa)
					 + ROUNDUP(sizeof(struct sockaddr)));
#endif
#endif
		}

		if (addr_sa == NULL)
			return (ISC_R_IGNORE);

		family = addr_sa->sa_family;
		if (family != AF_INET && family != AF_INET6)
			return (ISC_R_IGNORE);

		iter->current.af = family;

		get_addr(family, &iter->current.address, addr_sa,
			 iter->current.name);

		if (mask_sa != NULL)
			get_addr(family, &iter->current.netmask, mask_sa,
				 iter->current.name);

		if (dst_sa != NULL &&
		    (iter->current.flags & INTERFACE_F_POINTTOPOINT) != 0)
			get_addr(family, &iter->current.dstaddress, dst_sa,
				 iter->current.name);

		if (dst_sa != NULL &&
		    (iter->current.flags & INTERFACE_F_BROADCAST) != 0)
			get_addr(family, &iter->current.broadcast, dst_sa,
				 iter->current.name);

		return (ISC_R_SUCCESS);
	} else {
		printf(isc_msgcat_get(isc_msgcat, ISC_MSGSET_IFITERSYSCTL,
				      ISC_MSG_UNEXPECTEDTYPE,
				      "warning: unexpected interface list "
				      "message type\n"));
		return (ISC_R_IGNORE);
	}
}
コード例 #27
0
ファイル: pspgl_buffers.c プロジェクト: Bracket-/psp-ports
static GLboolean __pspgl_buffer_init(struct pspgl_buffer *buf,
				     GLsizeiptr size, GLenum usage)
{
	void *p = NULL;
	int try_hard = 0;

	size = ROUNDUP(size, CACHELINE_SIZE);

	buf->base = NULL;
	buf->size = size;

	switch(usage) {
	case GL_STATIC_COPY_ARB:	/* must be in edram */
	case GL_DYNAMIC_COPY_ARB:
	case GL_STREAM_COPY_ARB:
		/* We'll try hard to get vidmem, but it doesn't matter
		   if we fail because the buffer will be moved into
		   vidmem when needed. */
		try_hard = 1;
		/* FALLTHROUGH */

	case GL_STATIC_DRAW_ARB:	/* nice to have in edram */
	case GL_STATIC_READ_ARB:
	case GL_DYNAMIC_READ_ARB:
		if (__pspgl_vidmem_alloc(buf))
			p = buf->base;
		else if (try_hard) {
			/* Work hard to get the memory... */

			/* If there just isn't enough space, evict some buffers */
			if (__pspgl_vidmem_avail() < size)
				evict_vidmem(size);

			/* Try again after some evicitions */
			if (__pspgl_vidmem_alloc(buf))
				p = buf->base;
			else if (__pspgl_vidmem_compact()) {
				__pspgl_moved_textures();

				/* and one last time before giving up */
				if (__pspgl_vidmem_alloc(buf))
					p = buf->base;
			}
		}
		break;

	case GL_DYNAMIC_DRAW_ARB:	/* prefer in system memory */
	case GL_STREAM_READ_ARB:
	case GL_STREAM_DRAW_ARB:
		/* fallthrough to allocation */
		break;

	default:
		GLERROR(GL_INVALID_ENUM);
		return GL_FALSE;
	}

	if (p == NULL) {
		p = memalign(CACHELINE_SIZE, size);

		if (p == NULL)
			return GL_FALSE;
	}

  	/* put cache into appropriate unmapped state */
	sceKernelDcacheWritebackInvalidateRange(p, size);

	buf->refcount = 1;
	buf->mapped = 0;
	buf->flags = 0;
	buf->generation = 0;

	buf->pin_prevp = NULL;
	buf->pin_next = NULL;

	buf->list_prev = NULL;
	buf->list_next = NULL;

	buf->base = p;
	buf->size = size;

	buffer_insert_tail(buf);

	return GL_TRUE;
}
コード例 #28
0
ファイル: virtio-gpu.c プロジェクト: 0xBADCA7/lk
status_t virtio_gpu_start(struct virtio_device *dev)
{
    status_t err;

    LTRACEF("dev %p\n", dev);

    struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;

    /* get the display info and see if we find a valid pmode */
    err = get_display_info(gdev);
    if (err < 0) {
        LTRACEF("failed to get display info\n");
        return err;
    }

    if (gdev->pmode_id < 0) {
        LTRACEF("we failed to find a pmode, exiting\n");
        return ERR_NOT_FOUND;
    }

    /* allocate a resource */
    err = allocate_2d_resource(gdev, &gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
    if (err < 0) {
        LTRACEF("failed to allocate 2d resource\n");
        return err;
    }

    /* attach a backing store to the resource */
    size_t len = gdev->pmode.r.width * gdev->pmode.r.height * 4;
    gdev->fb = pmm_alloc_kpages(ROUNDUP(len, PAGE_SIZE) / PAGE_SIZE, NULL);
    if (!gdev->fb) {
        TRACEF("failed to allocate framebuffer, wanted 0x%zx bytes\n", len);
        return ERR_NO_MEMORY;
    }

    printf("virtio-gpu: framebuffer at %p, 0x%zx bytes\n", gdev->fb, len);

    err = attach_backing(gdev, gdev->display_resource_id, gdev->fb, len);
    if (err < 0) {
        LTRACEF("failed to attach backing store\n");
        return err;
    }

    /* attach this resource as a scanout */
    err = set_scanout(gdev, gdev->pmode_id, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
    if (err < 0) {
        LTRACEF("failed to set scanout\n");
        return err;
    }

    /* create the flush thread */
    thread_t *t;
    t = thread_create("virtio gpu flusher", &virtio_gpu_flush_thread, (void *)gdev, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
    thread_detach_and_resume(t);

    /* kick it once */
    event_signal(&gdev->flush_event, true);

    LTRACE_EXIT;

    return NO_ERROR;
}
コード例 #29
0
    void*
malloc(size_t n)
{
    int i, cont;
    int nwrap;
    uint32_t *ref;
    void *v;

    if (mptr == 0)
        mptr = mbegin;

    n = ROUNDUP(n, 4);

    if (n >= MAXMALLOC)
        return 0;

    if ((uintptr_t) mptr % PGSIZE){
        /*
         * we're in the middle of a partially
         * allocated page - can we add this chunk?
         * the +4 below is for the ref count.
         */
        ref = (uint32_t*) (ROUNDUP(mptr, PGSIZE) - 4);
        if ((uintptr_t) mptr / PGSIZE == (uintptr_t) (mptr + n - 1 + 4) / PGSIZE) {
            (*ref)++;
            v = mptr;
            mptr += n;
            return v;
        }
        /*
         * stop working on this page and move on.
         */
        free(mptr);	/* drop reference to this page */
        mptr = ROUNDDOWN(mptr + PGSIZE, PGSIZE);
    }

    /*
     * now we need to find some address space for this chunk.
     * if it's less than a page we leave it open for allocation.
     * runs of more than a page can't have ref counts so we
     * flag the PTE entries instead.
     */
    nwrap = 0;
    while (1) {
        if (isfree(mptr, n + 4))
            break;
        mptr += PGSIZE;
        if (mptr == mend) {
            mptr = mbegin;
            if (++nwrap == 2)
                return 0;	/* out of address space */
        }
    }

    /*
     * allocate at mptr - the +4 makes sure we allocate a ref count.
     */
    for (i = 0; i < n + 4; i += PGSIZE){
        cont = (i + PGSIZE < n + 4) ? PTE_CONTINUED : 0;
        if (sys_page_alloc(0, mptr + i, PTE_P|PTE_U|PTE_W|cont) < 0){
            for (; i >= 0; i -= PGSIZE)
                sys_page_unmap(0, mptr + i);
            return 0;	/* out of physical memory */
        }
    }

    ref = (uint32_t*) (mptr + i - 4);
    *ref = 2;	/* reference for mptr, reference for returned block */
    v = mptr;
    mptr += n;
    return v;
}
コード例 #30
0
void
VMCI_ReadDatagramsFromPort(VMCIIoHandle ioHandle,  // IN
                           VMCIIoPort dgInPort,    // IN
                           uint8 *dgInBuffer,      // IN
                           size_t dgInBufferSize)  // IN
{
   VMCIDatagram *dg;
   size_t currentDgInBufferSize = PAGE_SIZE;
   size_t remainingBytes;

   ASSERT(dgInBufferSize >= PAGE_SIZE);

   VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize);
   dg = (VMCIDatagram *)dgInBuffer;
   remainingBytes = currentDgInBufferSize;

   while (dg->dst.resource != VMCI_INVALID_ID || remainingBytes > PAGE_SIZE) {
      unsigned dgInSize;

      /*
       * When the input buffer spans multiple pages, a datagram can
       * start on any page boundary in the buffer.
       */

      if (dg->dst.resource == VMCI_INVALID_ID) {
         ASSERT(remainingBytes > PAGE_SIZE);
         dg = (VMCIDatagram *)ROUNDUP((uintptr_t)dg + 1, PAGE_SIZE);
         ASSERT((uint8 *)dg < dgInBuffer + currentDgInBufferSize);
         remainingBytes = (size_t)(dgInBuffer + currentDgInBufferSize - (uint8 *)dg);
         continue;
      }

      dgInSize = VMCI_DG_SIZE_ALIGNED(dg);

      if (dgInSize <= dgInBufferSize) {
         int result;

         /*
          * If the remaining bytes in the datagram buffer doesn't
          * contain the complete datagram, we first make sure we have
          * enough room for it and then we read the reminder of the
          * datagram and possibly any following datagrams.
          */

         if (dgInSize > remainingBytes) {

            if (remainingBytes != currentDgInBufferSize) {

               /*
                * We move the partial datagram to the front and read
                * the reminder of the datagram and possibly following
                * calls into the following bytes.
                */

               memmove(dgInBuffer, dgInBuffer + currentDgInBufferSize - remainingBytes,
                       remainingBytes);

               dg = (VMCIDatagram *)dgInBuffer;
            }

            if (currentDgInBufferSize != dgInBufferSize) {
               currentDgInBufferSize = dgInBufferSize;
            }

            VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer + remainingBytes,
                               currentDgInBufferSize - remainingBytes);
         }

         /* We special case event datagrams from the hypervisor. */
         if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
             dg->dst.resource == VMCI_EVENT_HANDLER) {
            result = VMCIEvent_Dispatch(dg);
         } else {
            result = VMCIDatagram_InvokeGuestHandler(dg);
         }
         if (result < VMCI_SUCCESS) {
            VMCI_DEBUG_LOG(4, (LGPFX"Datagram with resource (ID=0x%x) failed "
                               "(err=%d).\n", dg->dst.resource, result));
         }

         /* On to the next datagram. */
         dg = (VMCIDatagram *)((uint8 *)dg + dgInSize);
      } else {
         size_t bytesToSkip;

         /*
          * Datagram doesn't fit in datagram buffer of maximal size. We drop it.
          */

         VMCI_DEBUG_LOG(4, (LGPFX"Failed to receive datagram (size=%u bytes).\n",
                            dgInSize));

         bytesToSkip = dgInSize - remainingBytes;
         if (currentDgInBufferSize != dgInBufferSize) {
            currentDgInBufferSize = dgInBufferSize;
         }
         for (;;) {
            VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize);
            if (bytesToSkip <= currentDgInBufferSize) {
               break;
            }
            bytesToSkip -= currentDgInBufferSize;
         }
         dg = (VMCIDatagram *)(dgInBuffer + bytesToSkip);
      }

      remainingBytes = (size_t) (dgInBuffer + currentDgInBufferSize - (uint8 *)dg);

      if (remainingBytes < VMCI_DG_HEADERSIZE) {
         /* Get the next batch of datagrams. */

         VMCI_ReadPortBytes(ioHandle, dgInPort, dgInBuffer, currentDgInBufferSize);
         dg = (VMCIDatagram *)dgInBuffer;
         remainingBytes = currentDgInBufferSize;
      }
   }
}