Example #1
0
static bool ralloc_add_overlap_c_api_test(void) {
    BEGIN_TEST;

    // Make a pool and attach it to an allocator.
    ralloc_allocator_t* alloc = NULL;
    {
        ralloc_pool_t* pool;
        ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
        ASSERT_NONNULL(pool, "");

        // Create an allocator and add our region pool to it.
        ASSERT_EQ(ZX_OK, ralloc_create_allocator(&alloc), "");
        ASSERT_NONNULL(alloc, "");
        ASSERT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

        // Release our pool reference.  The allocator should be holding onto its own
        // reference at this point.
        ralloc_release_pool(pool);
    }

    // Add each of the regions specified by the test and check the expected results.
    for (size_t i = 0; i < countof(ADD_OVERLAP_TESTS); ++i) {
        const alloc_add_overlap_test_t* TEST = ADD_OVERLAP_TESTS + i;

        zx_status_t res = ralloc_add_region(alloc, &TEST->reg, TEST->ovl);

        EXPECT_EQ(TEST->res, res, "");
        EXPECT_EQ(TEST->cnt, ralloc_get_available_region_count(alloc), "");
    }

    // Destroy our allocator.
    ralloc_destroy_allocator(alloc);

    END_TEST;
}
Example #2
0
static bool ZbiTestTruncated(void) {
    BEGIN_TEST;
    uint8_t* test_zbi = get_test_zbi();

    auto cleanup = fbl::MakeAutoCall([test_zbi]() {
        free(test_zbi);
    });

    ASSERT_NONNULL(test_zbi, "failed to alloc test image");

    zbi::Zbi image(test_zbi);

    zbi_header_t* bootdata_header = reinterpret_cast<zbi_header_t*>(test_zbi);
    bootdata_header->length -= 8; // Truncate the image.

    zbi_header_t* trace = nullptr;
    ASSERT_NE(image.Check(&trace), ZBI_RESULT_OK,
              "Truncated image reported as okay");

    // zbi.Check should only give us diagnostics about the error if there was
    // an error in the first place.
    ASSERT_NONNULL(trace, "Bad image with no trace diagnostics?");

    int count = 0;
    zbi_result_t result = image.ForEach(check_contents, &count);

    ASSERT_NE(result, ZBI_RESULT_OK,
              "Truncated image not reported as truncated");

    ASSERT_EQ(count, 3, "bad bootdata item count");

    END_TEST;
}
Example #3
0
static bool ralloc_specific_c_api_test(void) {
    BEGIN_TEST;

    // Make a pool and attach it to an allocator.  Then add the test regions to it.
    ralloc_allocator_t* alloc = NULL;
    {
        ralloc_pool_t* pool;
        ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
        ASSERT_NONNULL(pool, "");

        // Create an allocator and add our region pool to it.
        ASSERT_EQ(ZX_OK, ralloc_create_allocator(&alloc), "");
        ASSERT_NONNULL(alloc, "");
        ASSERT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

        // Release our pool reference.  The allocator should be holding onto its own
        // reference at this point.
        ralloc_release_pool(pool);
    }

    for (size_t i = 0; i < countof(ALLOC_SPECIFIC_REGIONS); ++i)
        EXPECT_EQ(ZX_OK, ralloc_add_region(alloc, &ALLOC_SPECIFIC_REGIONS[i], false), "");

    // Run the alloc by size tests.  Hold onto the regions it allocates so they
    // can be cleaned up properly when the test finishes.
    const ralloc_region_t* regions[countof(ALLOC_SPECIFIC_TESTS)];
    memset(regions, 0, sizeof(regions));

    for (size_t i = 0; i < countof(ALLOC_SPECIFIC_TESTS); ++i) {
        const alloc_specific_alloc_test_t* TEST = ALLOC_SPECIFIC_TESTS + i;
        zx_status_t res = ralloc_get_specific_region_ex(alloc, &TEST->req, regions + i);

        // Make sure we get the test result we were expecting.
        EXPECT_EQ(TEST->res, res, "");

        // If the allocation claimed to succeed, we should have gotten back a
        // non-null region which exactly matches our requested region.
        if (res == ZX_OK) {
            ASSERT_NONNULL(regions[i], "");
            EXPECT_EQ(TEST->req.base, regions[i]->base, "");
            EXPECT_EQ(TEST->req.size, regions[i]->size, "");
        } else {
            EXPECT_NULL(regions[i], "");
        }
    }

    // Put the regions we have allocated back in the allocator.
    for (size_t i = 0; i < countof(regions); ++i)
        if (regions[i])
            ralloc_put_region(regions[i]);

    // Destroy our allocator.
    ralloc_destroy_allocator(alloc);

    END_TEST;
}
Example #4
0
static void free_swayc(swayc_t *cont) {
	if (!ASSERT_NONNULL(cont)) {
		return;
	}
	// TODO does not properly handle containers with children,
	// TODO but functions that call this usually check for that
	if (cont->children) {
		if (cont->children->length) {
			int i;
			for (i = 0; i < cont->children->length; ++i) {
				free_swayc(cont->children->items[i]);
			}
		}
		list_free(cont->children);
	}
	if (cont->floating) {
		if (cont->floating->length) {
			int i;
			for (i = 0; i < cont->floating->length; ++i) {
				free_swayc(cont->floating->items[i]);
			}
		}
		list_free(cont->floating);
	}
	if (cont->parent) {
		remove_child(cont);
	}
	if (cont->name) {
		free(cont->name);
	}
	free(cont);
}
Example #5
0
static bool ZbiTestBadContainer(void) {
    BEGIN_TEST;

    uint8_t* test_zbi = get_test_zbi();

    auto cleanup = fbl::MakeAutoCall([test_zbi]() {
        free(test_zbi);
    });

    ASSERT_NONNULL(test_zbi, "failed to alloc test image");

    zbi_header_t* bootdata_header = reinterpret_cast<zbi_header_t*>(test_zbi);
    // Set to something arbitrary
    bootdata_header->type = ZBI_TYPE_STORAGE_BOOTFS;

    zbi::Zbi image(test_zbi);

    zbi_header_t* problem_header = nullptr;
    ASSERT_NE(image.Check(&problem_header), ZBI_RESULT_OK,
              "bad container fault not detected");

    // Make sure that the diagnostic information tells us that the container is
    // bad.
    ASSERT_EQ(problem_header, bootdata_header);

    END_TEST;
}
Example #6
0
static bool ZbiTestBasic(void) {
    BEGIN_TEST;
    uint8_t* test_zbi = get_test_zbi();

    auto cleanup = fbl::MakeAutoCall([test_zbi]() {
        free(test_zbi);
    });

    ASSERT_NONNULL(test_zbi, "failed to alloc test image");

    zbi::Zbi image(test_zbi);

    zbi_header_t* trace = nullptr;
    ASSERT_EQ(image.Check(&trace), ZBI_RESULT_OK, "malformed image");

    // zbi.Check should only give us diagnostics about the error if there was
    // an error in the first place.
    ASSERT_NULL(trace, "bad header set but image reported okay?");

    int count = 0;
    zbi_result_t result = image.ForEach(check_contents, &count);

    ASSERT_EQ(result, ZBI_RESULT_OK, "content check failed");

    ASSERT_EQ(count, 3, "bad bootdata item count");

    END_TEST;
}
Example #7
0
swayc_t *new_view(swayc_t *sibling, wlc_handle handle) {
	if (!ASSERT_NONNULL(sibling)) {
		return NULL;
	}
	const char *title = wlc_view_get_title(handle);
	swayc_t *view = new_swayc(C_VIEW);
	sway_log(L_DEBUG, "Adding new view %lu:%s to container %p %d",
		handle, title, sibling, sibling ? sibling->type : 0);
	// Setup values
	view->handle = handle;
	view->name = title ? strdup(title) : NULL;
	view->visible = true;
	view->is_focused = true;
	// Setup geometry
	const struct wlc_geometry* geometry = wlc_view_get_geometry(handle);
	view->width = 0;
	view->height = 0;
	view->desired_width = geometry->size.w;
	view->desired_height = geometry->size.h;

	view->gaps = config->gaps_inner;

	view->is_floating = false;

	if (sibling->type == C_WORKSPACE) {
		// Case of focused workspace, just create as child of it
		add_child(sibling, view);
	} else {
		// Regular case, create as sibling of current container
		add_sibling(sibling, view);
	}
	return view;
}
Example #8
0
bool TestFuzzer::Init() {
    BEGIN_HELPER;
    Reset();

    out_ = open_memstream(&outbuf_, &outbuflen_);
    ASSERT_NONNULL(out_);

    err_ = open_memstream(&errbuf_, &errbuflen_);
    ASSERT_NONNULL(err_);

    // Configure base object
    set_root(fixture_.path());
    set_out(out_);
    set_err(err_);

    END_HELPER;
}
Example #9
0
// Make sure we never overflow the ZBI's buffer by appending.
static bool ZbiTestAppendFull(void) {
    BEGIN_TEST;

    // Enough space for a small payload
    const size_t kMaxAppendPayloadSize = ZBI_ALIGN(5);
    const size_t kExtraBytes = sizeof(zbi_header_t) + kMaxAppendPayloadSize;
    const size_t kZbiSize = sizeof(test_zbi_t) + kExtraBytes;
    const size_t kExtraSentinelLength = 64;

    uint8_t* test_zbi = get_test_zbi_extra(kExtraBytes + kExtraSentinelLength);

    ASSERT_NONNULL(test_zbi, "failed to alloc test image");

    auto cleanup = fbl::MakeAutoCall([test_zbi] {
        free(test_zbi);
    });

    // Fill the space after the buffer with sentinel bytes and make sure those
    // bytes are never touched by the append operation.
    const uint8_t kSentinelByte = 0xa5; // 0b1010 1010 0101 0101
    memset(test_zbi + kZbiSize, kSentinelByte, kExtraSentinelLength);

    zbi::Zbi image(test_zbi, kZbiSize);

    const uint8_t kDataByte = 0xc3;
    uint8_t dataBuffer[kMaxAppendPayloadSize + 1];
    memset(dataBuffer, kDataByte, kMaxAppendPayloadSize);

    // Try to append a buffer that's one byte too big and make sure we reject
    // it.
    zbi_result_t res = image.AppendSection(
        kMaxAppendPayloadSize + 1, // One more than the max length!
        ZBI_TYPE_STORAGE_RAMDISK,
        0,
        0,
        reinterpret_cast<const void*>(dataBuffer));

    ASSERT_NE(res, ZBI_RESULT_OK, "zbi appended a section that was too big");

    // Now try again with a section that is exactly the right size. Make sure
    // we don't stomp on the sentinel.
    res = image.AppendSection(
        kMaxAppendPayloadSize,
        ZBI_TYPE_STORAGE_RAMDISK,
        0,
        0,
        reinterpret_cast<const void*>(dataBuffer));

    ASSERT_EQ(res, ZBI_RESULT_OK, "zbi_append rejected a section that should "
                                  "have fit.");

    for (size_t i = 0; i < kExtraSentinelLength; i++) {
        ASSERT_EQ(test_zbi[kZbiSize + i], kSentinelByte,
                  "corrupt sentinel bytes, append section overflowed.");
    }

    END_TEST;
}
Example #10
0
bool TestRenameExclusive(void) {
    BEGIN_TEST;
    for (size_t i = 0; i < kIterCount; i++) {

        // Test case of renaming from a single source.
        ASSERT_EQ(mkdir("::rename_start", 0666), 0);
        ASSERT_TRUE((thread_action_test<10, 1>([](void* arg) {
            if (rename("::rename_start", "::rename_end") == 0) {
                return kSuccess;
            } else if (errno == ENOENT) {
                return kFailure;
            }
            return kUnexpectedFailure;
        })));
        ASSERT_EQ(rmdir("::rename_end"), 0);

        // Test case of renaming from multiple sources at once,
        // to a single destination
        std::atomic<uint32_t> ctr{0};
        ASSERT_TRUE((thread_action_test<10, 1>([](void* arg) {
            auto ctr = reinterpret_cast<std::atomic<uint32_t>*>(arg);
            char start[128];
            snprintf(start, sizeof(start) - 1, "::rename_start_%u", ctr->fetch_add(1));
            if (mkdir(start, 0666)) {
                return kUnexpectedFailure;
            }

            // Give the target a child, so it cannot be overwritten as a target
            char child[256];
            snprintf(child, sizeof(child) - 1, "%s/child", start);
            if (mkdir(child, 0666)) {
                return kUnexpectedFailure;
            }

            if (rename(start, "::rename_end") == 0) {
                return kSuccess;
            } else if (errno == ENOTEMPTY || errno == EEXIST) {
                return rmdir(child) == 0 && rmdir(start) == 0 ? kFailure :
                        kUnexpectedFailure;
            }
            return kUnexpectedFailure;
        }, &ctr)));

        DIR* dir = opendir("::rename_end");
        ASSERT_NONNULL(dir);
        struct dirent* de;
        while ((de = readdir(dir)) && de != nullptr) {
            unlinkat(dirfd(dir), de->d_name, AT_REMOVEDIR);
        }
        ASSERT_EQ(closedir(dir), 0);
        ASSERT_EQ(rmdir("::rename_end"), 0);
    }
    END_TEST;
}
Example #11
0
bool detach_self_test(void) {
    BEGIN_TEST;

    for (size_t i = 0; i < 1000; i++) {
        thrd_t* thrd = calloc(sizeof(thrd_t), 1);
        ASSERT_NONNULL(thrd, "");
        ASSERT_EQ(thrd_create(thrd, detach_thrd, thrd), 0, "");
    }

    END_TEST;
}
Example #12
0
swayc_t *destroy_output(swayc_t *output) {
	if (!ASSERT_NONNULL(output)) {
		return NULL;
	}
	if (output->children->length == 0) {
		// TODO move workspaces to other outputs
	}
	sway_log(L_DEBUG, "OUTPUT: Destroying output '%lu'", output->handle);
	free_swayc(output);
	return &root_container;
}
Example #13
0
void reset_gaps(swayc_t *view, void *data) {
	if (!ASSERT_NONNULL(view)) {
		return;
	}
	if (view->type == C_OUTPUT) {
		view->gaps = config->gaps_outer;
	}
	if (view->type == C_VIEW) {
		view->gaps = config->gaps_inner;
	}
}
Example #14
0
swayc_t *swayc_parent_by_type(swayc_t *container, enum swayc_types type) {
	if (!ASSERT_NONNULL(container)) {
		return NULL;
	}
	if (!sway_assert(type < C_TYPES && type >= C_ROOT, "%s: invalid type", __func__)) {
		return NULL;
	}
	do {
		container = container->parent;
	} while(container && container->type != type);
	return container;
}
Example #15
0
swayc_t *destroy_container(swayc_t *container) {
	if (!ASSERT_NONNULL(container)) {
		return NULL;
	}
	while (container->children->length == 0 && container->type == C_CONTAINER) {
		sway_log(L_DEBUG, "Container: Destroying container '%p'", container);
		swayc_t *parent = container->parent;
		free_swayc(container);
		container = parent;
	}
	return container;
}
Example #16
0
swayc_t *swayc_parent_by_layout(swayc_t *container, enum swayc_layouts layout) {
	if (!ASSERT_NONNULL(container)) {
		return NULL;
	}
	if (!sway_assert(layout < L_LAYOUTS && layout >= L_NONE, "%s: invalid layout", __func__)) {
		return NULL;
	}
	do {
		container = container->parent;
	} while (container && container->layout != layout);
	return container;
}
Example #17
0
static bool ralloc_subtract_c_api_test(void) {
    BEGIN_TEST;

    // Make a pool and attach it to an allocator.
    ralloc_allocator_t* alloc = NULL;
    {
        ralloc_pool_t* pool;
        ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
        ASSERT_NONNULL(pool, "");

        // Create an allocator and add our region pool to it.
        ASSERT_EQ(ZX_OK, ralloc_create_allocator(&alloc), "");
        ASSERT_NONNULL(alloc, "");
        ASSERT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

        // Release our pool reference.  The allocator should be holding onto its own
        // reference at this point.
        ralloc_release_pool(pool);
    }

    // Run the test sequence, adding and subtracting regions and verifying the results.
    for (size_t i = 0; i < countof(SUBTRACT_TESTS); ++i) {
        const alloc_subtract_test_t* TEST = SUBTRACT_TESTS + i;

        zx_status_t res;
        if (TEST->add)
            res = ralloc_add_region(alloc, &TEST->reg, false);
        else
            res = ralloc_sub_region(alloc, &TEST->reg, TEST->incomplete);

        EXPECT_EQ(TEST->res ? ZX_OK : ZX_ERR_INVALID_ARGS, res, "");
        EXPECT_EQ(TEST->cnt, ralloc_get_available_region_count(alloc), "");
    }

    // Destroy our allocator.
    ralloc_destroy_allocator(alloc);

    END_TEST;
}
Example #18
0
swayc_t *destroy_view(swayc_t *view) {
	if (!ASSERT_NONNULL(view)) {
		return NULL;
	}
	sway_log(L_DEBUG, "Destroying view '%p'", view);
	swayc_t *parent = view->parent;
	free_swayc(view);

	// Destroy empty containers
	if (parent->type == C_CONTAINER) {
		return destroy_container(parent);
	}
	return parent;
}
Example #19
0
void set_view_visibility(swayc_t *view, void *data) {
	if (!ASSERT_NONNULL(view)) {
		return;
	}
	uint32_t *p = data;
	if (view->type == C_VIEW) {
		wlc_view_set_mask(view->handle, *p);
		if (*p == 2) {
			wlc_view_bring_to_front(view->handle);
		} else {
			wlc_view_send_to_back(view->handle);
		}
	}
	view->visible = (*p == 2);
}
Example #20
0
zx_status_t TestFuzzer::Eval(const char* cmdline) {
    BEGIN_HELPER;
    ASSERT_TRUE(Init());

    char* buf = strdup(cmdline);
    ASSERT_NONNULL(buf);
    auto cleanup = fbl::MakeAutoCall([&buf]() { free(buf); });
    char* ptr = buf;
    char* arg;
    while ((arg = strsep(&ptr, " "))) {
        if (arg && *arg) {
            args_.push_back(arg);
        }
    }

    END_HELPER;
}
Example #21
0
bool TestInodeReuse(void) {
    BEGIN_TEST;

    ASSERT_EQ(mkdir("::reuse", 0755), 0);
    DIR* d = opendir("::reuse");
    ASSERT_NONNULL(d);
    for (size_t i = 0; i < 1000; i++) {
        ASSERT_EQ(mkdirat(dirfd(d), "foo", 0666), 0);
        if (reuse_subdirectory) {
            ASSERT_EQ(mkdirat(dirfd(d), "foo/bar", 0666), 0);
            ASSERT_EQ(unlinkat(dirfd(d), "foo/bar", 0), 0);
        }
        ASSERT_EQ(unlinkat(dirfd(d), "foo", 0), 0);
    }
    ASSERT_EQ(closedir(d), 0);
    ASSERT_EQ(rmdir("::reuse"), 0);
    END_TEST;
}
Example #22
0
swayc_t *new_container(swayc_t *child, enum swayc_layouts layout) {
	if (!ASSERT_NONNULL(child)) {
		return NULL;
	}
	swayc_t *cont = new_swayc(C_CONTAINER);

	sway_log(L_DEBUG, "creating container %p around %p", cont, child);

	cont->layout = layout;
	cont->width = child->width;
	cont->height = child->height;
	cont->x = child->x;
	cont->y = child->y;
	cont->visible = child->visible;

	/* Container inherits all of workspaces children, layout and whatnot */
	if (child->type == C_WORKSPACE) {
		swayc_t *workspace = child;
		// reorder focus
		cont->focused = workspace->focused;
		workspace->focused = cont;
		// set all children focu to container
		int i;
		for (i = 0; i < workspace->children->length; ++i) {
			((swayc_t *)workspace->children->items[i])->parent = cont;
		}
		// Swap children
		list_t  *tmp_list  = workspace->children;
		workspace->children = cont->children;
		cont->children = tmp_list;
		// add container to workspace chidren
		add_child(workspace, cont);
		// give them proper layouts
		cont->layout = workspace->layout;
		workspace->layout = layout;
		set_focused_container_for(workspace, get_focused_view(workspace));
	} else { // Or is built around container
		swayc_t *parent = replace_child(child, cont);
		if (parent) {
			add_child(cont, child);
		}
	}
	return cont;
}
Example #23
0
swayc_t *new_workspace(swayc_t *output, const char *name) {
	if (!ASSERT_NONNULL(output)) {
		return NULL;
	}
	sway_log(L_DEBUG, "Added workspace %s for output %u", name, (unsigned int)output->handle);
	swayc_t *workspace = new_swayc(C_WORKSPACE);

	workspace->layout = L_HORIZ; // TODO: default layout
	workspace->x = output->x;
	workspace->y = output->y;
	workspace->width = output->width;
	workspace->height = output->height;
	workspace->name = strdup(name);
	workspace->visible = true;
	workspace->floating = create_list();

	add_child(output, workspace);
	return workspace;
}
Example #24
0
static bool ZbiTestAppend(void) {
    BEGIN_TEST;
    // Allocate an additional kExtraBytes at the end of the ZBI to test
    // appending.
    const size_t kExtraBytes = sizeof(zbi_header_t) + sizeof(kAppendRD);
    uint8_t* test_zbi = get_test_zbi_extra(kExtraBytes);
    uint8_t* reference_zbi = get_test_zbi();

    test_zbi_t* test_image = reinterpret_cast<test_zbi_t*>(test_zbi);
    test_zbi_t* reference_image = reinterpret_cast<test_zbi_t*>(reference_zbi);

    auto cleanup = fbl::MakeAutoCall([test_zbi, reference_zbi]() {
        free(test_zbi);
        free(reference_zbi);
    });

    ASSERT_NONNULL(test_zbi, "failed to alloc test image");

    const size_t kBufferSize = sizeof(test_zbi_t) + kExtraBytes;
    zbi::Zbi image(test_zbi, kBufferSize);

    zbi_result_t result = image.AppendSection(
        static_cast<uint32_t>(sizeof(kAppendRD)), // Length
        ZBI_TYPE_STORAGE_RAMDISK,                 // Type
        0,                                        // Extra
        0,                                        // Flags
        reinterpret_cast<const void*>(kAppendRD)  // Payload.
        );

    ASSERT_EQ(result, ZBI_RESULT_OK, "Append failed");

    // Make sure the image is valid.
    ASSERT_EQ(image.Check(nullptr), ZBI_RESULT_OK,
              "append produced invalid images");

    // Verify the integrity of the data.
    reference_image->header.length = test_image->header.length;
    ASSERT_EQ(memcmp(test_zbi, reference_zbi, sizeof(test_zbi_t)), 0,
              "Append corrupted image");

    END_TEST;
}
Example #25
0
swayc_t *destroy_workspace(swayc_t *workspace) {
	if (!ASSERT_NONNULL(workspace)) {
		return NULL;
	}
	// NOTE: This is called from elsewhere without checking children length
	// TODO move containers to other workspaces?
	// for now just dont delete
	
	// Do not destroy this if it's the last workspace on this output
	swayc_t *output = swayc_parent_by_type(workspace, C_OUTPUT);
	if (output && output->children->length == 1) {
		return NULL;
	}

	if (workspace->children->length == 0) {
		sway_log(L_DEBUG, "%s: '%s'", __func__, workspace->name);
		swayc_t *parent = workspace->parent;
		free_swayc(workspace);
		return parent;
	}
	return NULL;
}
Example #26
0
void ThreadGetList(THREADLIST* List)
{
    ASSERT_NONNULL(List);
    SHARED_ACQUIRE(LockThreads);

    //
    // This function converts a C++ std::unordered_map to a C-style THREADLIST[].
    // Also assume BridgeAlloc zeros the returned buffer.
    //
    List->count = (int)threadList.size();
    List->list = nullptr;

    if(List->count <= 0)
        return;

    // Allocate C-style array
    List->list = (THREADALLINFO*)BridgeAlloc(List->count * sizeof(THREADALLINFO));

    // Fill out the list data
    int index = 0;

    for(auto & itr : threadList)
    {
        HANDLE threadHandle = itr.second.Handle;

        // Get the debugger's active thread index
        if(threadHandle == hActiveThread)
            List->CurrentThread = index;

        memcpy(&List->list[index].BasicInfo, &itr.second, sizeof(THREADINFO));

        List->list[index].ThreadCip = GetContextDataEx(threadHandle, UE_CIP);
        List->list[index].SuspendCount = ThreadGetSuspendCount(threadHandle);
        List->list[index].Priority = ThreadGetPriority(threadHandle);
        List->list[index].WaitReason = ThreadGetWaitReason(threadHandle);
        List->list[index].LastError = ThreadGetLastErrorTEB(itr.second.ThreadLocalBase);
        index++;
    }
}
Example #27
0
// Test that appending multiple sections to a ZBI works
static bool ZbiTestAppendMulti(void) {
    BEGIN_TEST;
    uint8_t* reference_zbi = get_test_zbi();
    ASSERT_NONNULL(reference_zbi);
    auto cleanup = fbl::MakeAutoCall([reference_zbi]() {
        free(reference_zbi);
    });

    alignas(ZBI_ALIGNMENT) uint8_t test_zbi[sizeof(test_zbi_t)];
    zbi_header_t* hdr = reinterpret_cast<zbi_header_t*>(test_zbi);

    // Create an empty container.
    init_zbi_header(hdr);
    hdr->type = ZBI_TYPE_CONTAINER;
    hdr->extra = ZBI_CONTAINER_MAGIC;
    hdr->length = 0;

    zbi::Zbi image(test_zbi, sizeof(test_zbi));

    ASSERT_EQ(image.Check(nullptr), ZBI_RESULT_OK);

    zbi_result_t result;

    result = image.AppendSection(sizeof(kTestCmdline), ZBI_TYPE_CMDLINE, 0, 0, kTestCmdline);
    ASSERT_EQ(result, ZBI_RESULT_OK);

    result = image.AppendSection(sizeof(kTestRD), ZBI_TYPE_STORAGE_RAMDISK, 0, 0, kTestRD);
    ASSERT_EQ(result, ZBI_RESULT_OK);

    result = image.AppendSection(sizeof(kTestBootfs), ZBI_TYPE_STORAGE_BOOTFS, 0, 0, kTestBootfs);
    ASSERT_EQ(result, ZBI_RESULT_OK);

    ASSERT_EQ(memcmp(reference_zbi, test_zbi, image.Length()), 0);

    END_TEST;
}
Example #28
0
static bool ralloc_pools_c_api_test(void) {
    BEGIN_TEST;

    // Make a pool for the bookkeeping.  Do not allow it to be very large.
    // Require that this succeeds, we will not be able to run the tests without
    // it.
    ralloc_pool_t* pool;
    ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
    ASSERT_NONNULL(pool, "");

    // Create an allocator.
    ralloc_allocator_t* alloc;
    ASSERT_EQ(ZX_OK, ralloc_create_allocator(&alloc), "");
    ASSERT_NONNULL(alloc, "");

    {
        // Make sure that it refuses to perform any operations because it has no
        // RegionPool assigned to it yet.
        const ralloc_region_t tmp = { .base = 0u, .size = 1u };
        const ralloc_region_t* out;

        EXPECT_EQ(ZX_ERR_BAD_STATE, ralloc_add_region(alloc, &tmp, false), "");
        EXPECT_EQ(ZX_ERR_BAD_STATE, ralloc_get_sized_region_ex(alloc, 1u, 1u, &out), "");
        EXPECT_EQ(ZX_ERR_BAD_STATE, ralloc_get_specific_region_ex(alloc, &tmp, &out), "");
        EXPECT_NULL(ralloc_get_sized_region(alloc, 1u, 1u), "");
        EXPECT_NULL(ralloc_get_specific_region(alloc, &tmp), "");
    }

    // Assign our pool to our allocator, but hold onto the pool for now.
    EXPECT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

    // Release our pool reference.  The allocator should be holding onto its own
    // reference at this point.
    ralloc_release_pool(pool);
    pool = NULL;

    // Add some regions to our allocator.
    for (size_t i = 0; i < countof(GOOD_REGIONS); ++i)
        EXPECT_EQ(ZX_OK, ralloc_add_region(alloc, &GOOD_REGIONS[i], false), "");

    // Make a new pool and try to assign it to the allocator.  This should fail
    // because the allocator is currently using resources from its currently
    // assigned pool.
    ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
    ASSERT_NONNULL(pool, "");
    EXPECT_EQ(ZX_ERR_BAD_STATE, ralloc_set_region_pool(alloc, pool), "");

    // Add a bunch of adjacent regions to our pool.  Try to add so many
    // that we would normally run out of bookkeeping space.  We should not
    // actually run out, however, because the regions should get merged as they
    // get added.
    {
        ralloc_region_t tmp = { .base = GOOD_MERGE_REGION_BASE,
                                     .size = GOOD_MERGE_REGION_SIZE };
        for (size_t i = 0; i < OOM_RANGE_LIMIT; ++i) {
            ASSERT_EQ(ZX_OK, ralloc_add_region(alloc, &tmp, false), "");
            tmp.base += tmp.size;
        }
    }

    // Attempt (and fail) to add some bad regions (regions which overlap,
    // regions which wrap the address space)
    for (size_t i = 0; i < countof(BAD_REGIONS); ++i)
        EXPECT_EQ(ZX_ERR_INVALID_ARGS, ralloc_add_region(alloc, &BAD_REGIONS[i], false), "");

    // Force the region bookkeeping pool to run out of memory by adding more and
    // more regions until we eventuall run out of room.  Make sure that the
    // regions are not adjacent, or the internal bookkeeping will just merge
    // them.
    {
        size_t i;
        ralloc_region_t tmp = { .base = BAD_MERGE_REGION_BASE,
                                .size = BAD_MERGE_REGION_SIZE };
        for (i = 0; i < OOM_RANGE_LIMIT; ++i) {
            zx_status_t res;

            res = ralloc_add_region(alloc, &tmp, false);
            if (res != ZX_OK) {
                EXPECT_EQ(ZX_ERR_NO_MEMORY, res, "");
                break;
            }

            tmp.base += tmp.size + 1;
        }

        EXPECT_LT(i, OOM_RANGE_LIMIT, "");
    }

    // Reset allocator.  All of the existing available regions we had previously
    // added will be returned to the pool.
    ralloc_reset_allocator(alloc);

    // Now assign the second pool to the allocator.  Now that the allocator is
    // no longer using any resources, this should succeed.
    EXPECT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

    // Release our pool reference.
    ralloc_release_pool(pool);

    // Destroy our allocator.
    ralloc_destroy_allocator(alloc);

    END_TEST;
}

static bool ralloc_by_size_c_api_test(void) {
    BEGIN_TEST;

    // Make a pool and attach it to an allocator.  Then add the test regions to it.
    ralloc_allocator_t* alloc = NULL;
    {
        ralloc_pool_t* pool;
        ASSERT_EQ(ZX_OK, ralloc_create_pool(REGION_POOL_MAX_SIZE, &pool), "");
        ASSERT_NONNULL(pool, "");

        // Create an allocator and add our region pool to it.
        ASSERT_EQ(ZX_OK, ralloc_create_allocator(&alloc), "");
        ASSERT_NONNULL(alloc, "");
        ASSERT_EQ(ZX_OK, ralloc_set_region_pool(alloc, pool), "");

        // Release our pool reference.  The allocator should be holding onto its own
        // reference at this point.
        ralloc_release_pool(pool);
    }

    for (size_t i = 0; i < countof(ALLOC_BY_SIZE_REGIONS); ++i)
        EXPECT_EQ(ZX_OK, ralloc_add_region(alloc, &ALLOC_BY_SIZE_REGIONS[i], false), "");

    // Run the alloc by size tests.  Hold onto the regions it allocates so they
    // can be cleaned up properly when the test finishes.
    const ralloc_region_t* regions[countof(ALLOC_BY_SIZE_TESTS)];
    memset(regions, 0, sizeof(regions));

    for (size_t i = 0; i < countof(ALLOC_BY_SIZE_TESTS); ++i) {
        const alloc_by_size_alloc_test_t* TEST = ALLOC_BY_SIZE_TESTS + i;
        zx_status_t res = ralloc_get_sized_region_ex(alloc,
                                                     TEST->size,
                                                     TEST->align,
                                                     regions + i);

        // Make sure we get the test result we were expecting.
        EXPECT_EQ(TEST->res, res, "");

        // If the allocation claimed to succeed, we should have gotten
        // back a non-null region.  Otherwise, we should have gotten a
        // null region back.
        if (res == ZX_OK) {
            ASSERT_NONNULL(regions[i], "");
        } else {
            EXPECT_NULL(regions[i], "");
        }

        // If the allocation succeeded, and we expected it to succeed,
        // the allocation should have come from the test region we
        // expect and be aligned in the way we asked.
        if ((res == ZX_OK) && (TEST->res == ZX_OK)) {
            ASSERT_LT(TEST->region, countof(ALLOC_BY_SIZE_TESTS), "");
            EXPECT_TRUE(region_contains_region(ALLOC_BY_SIZE_REGIONS + TEST->region,
                                               regions[i]), "");
            EXPECT_EQ(0u, regions[i]->base & (TEST->align - 1), "");
        }
    }

    // Put the regions we have allocated back in the allocator.
    for (size_t i = 0; i < countof(regions); ++i)
        if (regions[i])
            ralloc_put_region(regions[i]);

    // Destroy our allocator.
    ralloc_destroy_allocator(alloc);

    END_TEST;
}