Ejemplo n.º 1
0
void
memlist_copy(memblock *dest, memblock *source) {
    /* clear the destination */
    memlist_deleteall(dest);
    /* now copy, ignoring order */
    while( source->next ) {
        source = source->next;
        memlist_add( dest, source->where, source->size );
    }
}
Ejemplo n.º 2
0
/* Resolve dependencies for a given package
 * @param curl curl handle
 * @param hashdb hash database
 * @param curpkg current package we are resolving
 * @param dep_list pointer to list to store resulting dependencies
 * @param resolve_lvl level of dep resolution. RESOLVE_THOROUGH forces
 *        downloading of AUR PKGBUILDs
 *
 * returns -1 on error, 0 on success
 */
static int crawl_resolve(CURL *curl, struct pw_hashdb *hashdb, struct pkgpair *curpkg,
                         alpm_list_t **dep_list, int resolve_lvl)
{
    alpm_list_t *i, *depmod_list, *deps = NULL;
    struct pkgpair *pkgpair;
    struct pkgpair tmppkg;
    void *pkg_provides;
    void *memlist_ptr;
    const char *cache_result;
    const char *depname, *final_pkgname;
    char cwd[PATH_MAX];
    char buf[PATH_MAX];

    /* Normalize package before doing anything else */
    final_pkgname = normalize_package(curl, hashdb, curpkg->pkgname, resolve_lvl);
    if (!final_pkgname) {
        return -1;
    }

    enum pkgfrom_t *from = hashmap_search(hashdb->pkg_from, (void *) final_pkgname);
    if (!from) {
        die("Failed to find out where package \"%s\" is from!\n", final_pkgname);
    }

    switch (*from) {
    case PKG_FROM_LOCAL:
        tmppkg.pkgname = final_pkgname;
        pkgpair = hash_search(hashdb->local, &tmppkg);
        goto get_deps;
    case PKG_FROM_SYNC:
        tmppkg.pkgname = final_pkgname;
        pkgpair = hash_search(hashdb->sync, &tmppkg);
        goto get_deps;
    default:
        goto aur_deps;
    }

aur_uptodate:
    tmppkg.pkgname = final_pkgname;
    tmppkg.pkg = NULL;
    pkgpair = hash_search(hashdb->aur, &tmppkg);

get_deps:
    if (!pkgpair) {
        /* Shouldn't happen */
        die("Unable to find package \"%s\" in local/sync db!", final_pkgname);
    }

    depmod_list = alpm_pkg_get_depends(pkgpair->pkg);
    for (i = depmod_list; i; i = i->next) {
        char *s = alpm_dep_compute_string(i->data);
        strncpy(buf, s, sizeof(buf));
        free(s);
        chompversion(buf);
        depname = normalize_package(curl, hashdb, buf, resolve_lvl);
        /* Possibility of normalize_package fail due to AUR download failing */
        if (!depname) {
            alpm_list_free(deps);
            return -1;
        }
        deps = alpm_list_add(deps, (void *) depname);
    }

    if (dep_list) {
        *dep_list = deps;
    } else {
        alpm_list_free(deps);
    }

    return 0;

aur_deps:
    tmppkg.pkgname = final_pkgname;
    tmppkg.pkg = NULL;

    /* For installed AUR packages which are up to date */
    if (resolve_lvl != RESOLVE_THOROUGH) {
        if (hash_search(hashdb->aur, &tmppkg) &&
                !hash_search(hashdb->aur_outdated, (void *) final_pkgname)) {
            /* NOTE: top goto ! */
            goto aur_uptodate;
        }
    }

    /* RESOLVE_THOROUGH / out to date AUR package.
     * Download pkgbuild and extract deps */
    if (!getcwd(cwd, PATH_MAX)) {
        return error(PW_ERR_GETCWD);
    }

    if (chdir(final_pkgname)) {
        return error(PW_ERR_CHDIR);
    }

    deps = grab_dependencies("PKGBUILD");
    if (chdir(cwd)) {
        alpm_list_free(deps);
        return error(PW_ERR_RESTORECWD);
    }

    if (dep_list) {
        const char *normdep;
        alpm_list_t *new_deps = NULL;

        /* Transfer control to memlist and normalize packages */
        for (i = deps; i; i = i->next) {
            memlist_ptr = memlist_add(hashdb->strpool, &i->data);
            normdep = normalize_package(curl, hashdb, memlist_ptr, resolve_lvl);
            new_deps = alpm_list_add(new_deps, (void *) normdep);
        }

        *dep_list = new_deps;
    }

    alpm_list_free(deps);
    return 0;
}
Ejemplo n.º 3
0
/*
 * We want to add memory, but have no spare page_t structures.  Use some of
 * our new memory for the page_t structures.
 *
 * Somewhat similar to kphysm_add_memory_dynamic(), but simpler.
 */
static int
balloon_init_new_pages(mfn_t framelist[], pgcnt_t count)
{
	pgcnt_t	metapgs, totalpgs, num_pages;
	paddr_t	metasz;
	pfn_t	meta_start;
	page_t	*page_array;
	caddr_t	va;
	int	i, rv, locked;
	mem_structs_t *mem;
	struct memseg *segp;

	/* Calculate the number of pages we're going to add */
	totalpgs = bln_stats.bln_new_target - bln_stats.bln_current_pages;

	/*
	 * The following calculates the number of "meta" pages -- the pages
	 * that will be required to hold page_t structures for all new pages.
	 * Proof of this calculation is left up to the reader.
	 */
	metapgs = totalpgs - (((uint64_t)(totalpgs) << PAGESHIFT) /
	    (PAGESIZE + sizeof (page_t)));

	/*
	 * Given the number of page_t structures we need, is there also
	 * room in our meta pages for a memseg and memlist struct?
	 * If not, we'll need one more meta page.
	 */
	if ((metapgs << PAGESHIFT) < (totalpgs * sizeof (page_t) +
	    MEM_STRUCT_SIZE))
		metapgs++;

	/*
	 * metapgs is calculated from totalpgs, which may be much larger than
	 * count.  If we don't have enough pages, all of the pages in this
	 * batch will be made meta pages, and a future trip through
	 * balloon_inc_reservation() will add the rest of the meta pages.
	 */
	if (metapgs > count)
		metapgs = count;

	/*
	 * Figure out the number of page_t structures that can fit in metapgs
	 *
	 * This will cause us to initialize more page_t structures than we
	 * need - these may be used in future memory increases.
	 */
	metasz = pfn_to_pa(metapgs);
	num_pages = (metasz - MEM_STRUCT_SIZE) / sizeof (page_t);

	DTRACE_PROBE3(balloon__alloc__stats, pgcnt_t, totalpgs, pgcnt_t,
	    num_pages, pgcnt_t, metapgs);

	/*
	 * We only increment mfn_count by count, not num_pages, to keep the
	 * space of all valid pfns contiguous.  This means we create page_t
	 * structures with invalid pagenums -- we deal with this situation
	 * in balloon_page_sub.
	 */
	mfn_count += count;

	/*
	 * Get a VA for the pages that will hold page_t and other structures.
	 * The memseg and memlist structures will go at the beginning, with
	 * the page_t structures following.
	 */
	va = (caddr_t)vmem_alloc(heap_arena, metasz, VM_SLEEP);
	/* LINTED: improper alignment */
	mem = (mem_structs_t *)va;
	page_array = mem->pages;

	meta_start = bln_stats.bln_max_pages;

	/*
	 * Set the mfn to pfn mapping for the meta pages.
	 */
	locked = balloon_lock_contig_pfnlist(metapgs);
	for (i = 0; i < metapgs; i++) {
		reassign_pfn(bln_stats.bln_max_pages + i, framelist[i]);
	}
	if (locked)
		unlock_contig_pfnlist();

	/*
	 * For our meta pages, map them in and zero the page.
	 * This will be the first time touching the new pages.
	 */
	hat_devload(kas.a_hat, va, metasz, bln_stats.bln_max_pages,
	    PROT_READ | PROT_WRITE,
	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
	bzero(va, metasz);

	/*
	 * Initialize the page array for the new pages.
	 */
	for (i = 0; i < metapgs; i++) {
		page_array[i].p_pagenum = bln_stats.bln_max_pages++;
		page_array[i].p_offset = (u_offset_t)-1;
		page_iolock_init(&page_array[i]);
		rv = page_lock(&page_array[i], SE_EXCL, NULL, P_NO_RECLAIM);
		ASSERT(rv == 1);
	}

	/*
	 * For the rest of the pages, initialize the page_t struct and
	 * add them to the free list
	 */
	for (i = metapgs; i < num_pages; i++) {
		page_array[i].p_pagenum = bln_stats.bln_max_pages++;
		page_array[i].p_offset = (u_offset_t)-1;
		page_iolock_init(&page_array[i]);
		rv = page_lock(&page_array[i], SE_EXCL, NULL, P_NO_RECLAIM);
		ASSERT(rv == 1);
		balloon_page_add(&page_array[i]);
	}

	/*
	 * Remember where I said that we don't call this function?  The missing
	 * code right here is why.  We need to set up kpm mappings for any new
	 * pages coming in.  However, if someone starts up a domain with small
	 * memory, then greatly increases it, we could get in some horrible
	 * deadlock situations as we steal page tables for kpm use, and
	 * userland applications take them right back before we can use them
	 * to set up our new memory.  Once a way around that is found, and a
	 * few other changes are made, we'll be able to enable this code.
	 */

	/*
	 * Update kernel structures, part 1: memsegs list
	 */
	mem->memseg.pages_base = meta_start;
	mem->memseg.pages_end = bln_stats.bln_max_pages - 1;
	mem->memseg.pages = &page_array[0];
	mem->memseg.epages = &page_array[num_pages - 1];
	mem->memseg.next = NULL;
	memsegs_lock(1);
	for (segp = memsegs; segp->next != NULL; segp = segp->next)
		;
	segp->next = &mem->memseg;
	memsegs_unlock(1);

	/*
	 * Update kernel structures, part 2: mem_node array
	 */
	mem_node_add_slice(meta_start, bln_stats.bln_max_pages);

	/*
	 * Update kernel structures, part 3: phys_install array
	 * (*sigh* how many of these things do we need?)
	 */
	memlist_write_lock();
	memlist_add(pfn_to_pa(meta_start), num_pages, &mem->memlist,
	    &phys_install);
	memlist_write_unlock();

	build_pfn_hash();

	return (metapgs);
}
Ejemplo n.º 4
0
int
memlist_merge(TOKEN_VALUE* dest, TOKEN_VALUE* source, TOKEN_SIZE size) {
    /*
    test if the value beginning at the location begin, and
    continuing for size intersects with any other color value
    if so, then compare it to it.  If it matches, then update
    the memlist, combining the values if necessary... if compare
    fails, then return false

    note: on returning FALSE, the list may have been changed beyond
    repair, since we are assuming that a false will end the comparison
    
    NOTE: for now we will actually return two different
    truth values from this function.  1 if compare was
    necessary, and 2 if no overlaps

	argument size is a number of TOKEN_VALUES, not bytes
    */

    memblock *current = &head;
	memblock *working;
    memblock *last = &head;
    BOOLEAN found = FALSE;
	BOOLEAN bDuplicate = FALSE;

    /*
        look for an overlapping data range
        taking into account that the size of the data may be more
        than a single byte
    */
    TOKEN_VALUE *enddest;
    TOKEN_VALUE *endworking;
    char *cmpbegin;
    char *cmpend;
	char *cmpsource;
    int result;

    if( size <= 0 ){
        logerror("memlist_merge, invalid data");
        return MERGE_ERROR;
    }

    while( current->next ) {
        /* get the byte boudaries of the two ranges, leaving the last byte
			for the moment.  NOTE that these pointers are TOKEN_VALUES, and not chars */
		working = current->next;
        enddest = dest + size ;
        endworking = working->where + working->size;
        /* two cases where the byte boundaries (of the destination) are non-overlapping */
        /* note that since the enddest and endworking are actually one byte past the
            end of the block, we must check for the equality case */
        if( endworking <= dest
            || enddest <= working->where ) {
            current = current->next;
        }
        /*
        in any case of overlap, the overlapped value
        is represented by the max(begin) to the min(end)
        */
        else {
            found = TRUE;
            /* there IS an overlap */
            /* compare from the max begin to the min end */
            cmpbegin = (char*)ncmax( dest, working->where);
            cmpend = (char*)ncmin( endworking, enddest );
			cmpsource = (char*)source + ((char*)cmpbegin - (char*)dest);

            /* if the bytes are not aligned, then return false,
                because no way to reconcile on both big and little
                endian machines */
            if( (cmpend - cmpbegin) % sizeof(TOKEN_VALUE) )
                return MERGE_ERROR;

			/* compare the destination with the source */
            result = memcmp( cmpbegin, cmpsource, cmpend-cmpbegin );
            if(result)
                return MERGE_DIFF;

            /*
            now need to remove the current item, combine the
            two results, and move on to the next item

			for now we will not combine the items, but will instead continue 
			to compare the item with the remaining overlapping items until
			all of the list has been checked. 
			To avoid having dupliate items in the list, we will set a flag if
			one item is exactly the same as another item, and then fail to 
			add it to the list
            */
            {
            memblock *temp;
            /* make our current item equal to the sum of the old item 
            begin = ncmin( begin, current->where );
            size = (ncmax( endnew, endcurrent ) - begin + 1) / sizeof(TOKEN_VALUE); */
			if( dest == working->where 
				&& endworking == enddest ) {
				bDuplicate = TRUE;
				break;
			}
            /* move on to the next item */
            temp = current->next;
            /* do not delete the working item, because we are using it
			memblock_delete( &head, working ); */
            current = temp;
            }
        }

    }

    /* since we have reached the end, we must add the current data point */
	if( !bDuplicate )
		memlist_add( &head, dest, size );

    /* special return code for no comparisons necessary
        used for testing the function to the netman output */
    if( !found )
        return MERGE_NOMATCH;

    return MERGE_SAME;
}