Ejemplo n.º 1
0
pg_tbl_t* proc_create(char* virt_start, char* virt_end, uint32_t stack_size)
{
	pg_tbl_t* tbl = malloc(0x5000);
	
	if(!tbl)
		return (pg_tbl_t*)0;
	
	uint32_t stepsize = __plat_pg_tbl_maxentry();
	
	//Note: If this is changed, physical address calculations for setting up the pagetable have to be adjusted
	void* entry_loc = mem_phys_find_free(stepsize)+(uint32_t)PLATFORM_KERNEL_BASE;
	if(!entry_loc)
		return (pg_tbl_t*)0;
	
	mem_phys_set(entry_loc - (uint32_t)PLATFORM_KERNEL_BASE, stepsize);
	
	pg_create(tbl, entry_loc, (size_t)PLATFORM_PROC_MAX_MEM);
	
	uint32_t start_virt = ((uint32_t)virt_start)/stepsize;
	uint32_t end_virt = ((uint32_t)virt_end-1)/stepsize;
	uint32_t fail = 0;
	for(uint32_t i = start_virt; i<=end_virt && !fail; i++)
	{
		void* phys_addr = mem_phys_find_free(stepsize);
		if(phys_addr)
		{
			printf("proc: mapping %x to %x\r\n", (char*)(i*stepsize), phys_addr);
			pg_map(tbl, (char*)(i*stepsize), phys_addr , stepsize, 0, PERM_PRW_URW, 0, 0, 0);
			mem_phys_set(phys_addr , stepsize);
		}
		else
		{
			fail = 1;
			break;
		}
	}
	
	uint32_t stack_end = (uint32_t)virt_start-stack_size;
	stack_end /= stepsize;
	for(uint32_t i=stack_end; i<start_virt && !fail; i++)
	{
		void* phys_addr = mem_phys_find_free(stepsize);
		if(phys_addr)
		{
			printf("stack: mapping %x to %x\r\n", (char*)(i*stepsize), phys_addr);
			pg_map(tbl, (char*)(i*stepsize), phys_addr , stepsize, 0, PERM_PRW_URW, 0, 0, 0);
			mem_phys_set(phys_addr , stepsize);
		}
		else
		{
			fail = 1;
			break;
		}
	}
	//TODO free memory
	if(fail)
		return (pg_tbl_t*)0;
	
	return tbl;
}
Ejemplo n.º 2
0
/*!
   \brief Creates buffer around line.

   See also Vect_line_buffer().

   \param InPoints input line geometry
   \param da distance along major axis
   \param db distance along minor axis
   \param dalpha angle between 0x and major axis
   \param round make corners round
   \param caps add caps at line ends
   \param tol maximum distance between theoretical arc and output segments
   \param[out] oPoints output polygon outer border (ccw order)
   \param[out] inner_count number of holes
   \param[out] iPoints array of output polygon's holes (cw order)
 */
void Vect_line_buffer2(struct line_pnts *Points, double da, double db,
		       double dalpha, int round, int caps, double tol,
		       struct line_pnts **oPoints,
		       struct line_pnts ***iPoints, int *inner_count)
{
    struct planar_graph *pg;
    struct line_pnts *tPoints, *outer;
    struct line_pnts **isles;
    int isles_count = 0;
    int res, winding;
    int more = 8;
    int isles_allocated = 0;

    G_debug(2, "Vect_line_buffer()");

    Vect_line_prune(Points);

    if (Points->n_points == 1)
	return Vect_point_buffer2(Points->x[0], Points->y[0], da, db,
			dalpha, round, tol, oPoints);

    /* initializations */
    tPoints = Vect_new_line_struct();
    isles = NULL;
    pg = pg_create(Points);

    /* outer contour */
    outer = Vect_new_line_struct();
    extract_outer_contour(pg, 0, outer);

    /* inner contours */
    res = extract_inner_contour(pg, &winding, tPoints);
    while (res != 0) {
	add_line_to_array(tPoints, &isles, &isles_count, &isles_allocated,
			  more);
	tPoints = Vect_new_line_struct();
	res = extract_inner_contour(pg, &winding, tPoints);
    }

    buffer_lines(outer, isles, isles_count, RIGHT_SIDE, da, db, dalpha, round,
		 caps, tol, oPoints, iPoints, inner_count);

    Vect_destroy_line_struct(tPoints);
    Vect_destroy_line_struct(outer);
    destroy_lines_array(isles, isles_count);
    pg_destroy_struct(pg);

    return;
}
Ejemplo n.º 3
0
static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
  switch (po_cmp(a, b)) {
    case 0:
      return;
    case 1:
      GPR_SWAP(polling_obj *, a, b);
    /* fall through */
    case -1:
      gpr_mu_lock(&a->mu);
      gpr_mu_lock(&b->mu);

      if (a->group == NULL) {
        if (b->group == NULL) {
          polling_obj *initial_po[] = {a, b};
          pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po));
          gpr_mu_unlock(&a->mu);
          gpr_mu_unlock(&b->mu);
        } else {
          polling_group *b_group = pg_ref(b->group);
          gpr_mu_unlock(&b->mu);
          gpr_mu_unlock(&a->mu);
          pg_join(exec_ctx, b_group, a);
        }
      } else if (b->group == NULL) {
        polling_group *a_group = pg_ref(a->group);
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
        pg_join(exec_ctx, a_group, b);
      } else if (a->group == b->group) {
        /* nothing to do */
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
      } else {
        polling_group *a_group = pg_ref(a->group);
        polling_group *b_group = pg_ref(b->group);
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
        pg_merge(exec_ctx, a_group, b_group);
      }
  }
}
Ejemplo n.º 4
0
/*
 * CMT class callback for a new CPU entering the system
 *
 * This routine operates on the CPU specific processor group data (for the CPU
 * being initialized). The argument "pgdata" is a reference to the CPU's PG
 * data to be constructed.
 *
 * cp->cpu_pg is used by the dispatcher to access the CPU's PG data
 * references a "bootstrap" structure. pg_cmt_cpu_init() and the routines it
 * calls must be careful to operate only on the "pgdata" argument, and not
 * cp->cpu_pg.
 */
static void
pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *pgdata)
{
	pg_cmt_t	*pg;
	group_t		*cmt_pgs;
	int		levels, level;
	pghw_type_t	hw;
	pg_t		*pg_cache = NULL;
	pg_cmt_t	*cpu_cmt_hier[PGHW_NUM_COMPONENTS];
	lgrp_handle_t	lgrp_handle;
	cmt_lgrp_t	*lgrp;
	cmt_lineage_validation_t	lineage_status;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(pg_cpu_is_bootstrapped(cp));

	if (cmt_sched_disabled)
		return;

	/*
	 * A new CPU is coming into the system.
	 * Interrogate the platform to see if the CPU
	 * has any performance or efficiency relevant
	 * sharing relationships
	 */
	cmt_pgs = &pgdata->cmt_pgs;
	pgdata->cmt_lineage = NULL;

	bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier));
	levels = 0;
	for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) {

		pg_cmt_policy_t	policy;

		/*
		 * We're only interested in the hw sharing relationships
		 * for which we know how to optimize.
		 */
		policy = pg_cmt_policy(hw);
		if (policy == CMT_NO_POLICY ||
		    pg_plat_hw_shared(cp, hw) == 0)
			continue;

		/*
		 * We will still create the PGs for hardware sharing
		 * relationships that have been blacklisted, but won't
		 * implement CMT thread placement optimizations against them.
		 */
		if (cmt_hw_blacklisted[hw] == 1)
			policy = CMT_NO_POLICY;

		/*
		 * Find (or create) the PG associated with
		 * the hw sharing relationship in which cp
		 * belongs.
		 *
		 * Determine if a suitable PG already
		 * exists, or if one needs to be created.
		 */
		pg = (pg_cmt_t *)pghw_place_cpu(cp, hw);
		if (pg == NULL) {
			/*
			 * Create a new one.
			 * Initialize the common...
			 */
			pg = (pg_cmt_t *)pg_create(pg_cmt_class_id);

			/* ... physical ... */
			pghw_init((pghw_t *)pg, cp, hw);

			/*
			 * ... and CMT specific portions of the
			 * structure.
			 */
			pg->cmt_policy = policy;

			/* CMT event callbacks */
			cmt_callback_init((pg_t *)pg);

			bitset_init(&pg->cmt_cpus_actv_set);
			group_create(&pg->cmt_cpus_actv);
		} else {
			ASSERT(IS_CMT_PG(pg));
		}

		/* Add the CPU to the PG */
		pg_cpu_add((pg_t *)pg, cp, pgdata);

		/*
		 * Ensure capacity of the active CPU group/bitset
		 */
		group_expand(&pg->cmt_cpus_actv,
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		if (cp->cpu_seqid >=
		    bitset_capacity(&pg->cmt_cpus_actv_set)) {
			bitset_resize(&pg->cmt_cpus_actv_set,
			    cp->cpu_seqid + 1);
		}

		/*
		 * Build a lineage of CMT PGs for load balancing / coalescence
		 */
		if (policy & (CMT_BALANCE | CMT_COALESCE)) {
			cpu_cmt_hier[levels++] = pg;
		}

		/* Cache this for later */
		if (hw == PGHW_CACHE)
			pg_cache = (pg_t *)pg;
	}

	group_expand(cmt_pgs, levels);

	if (cmt_root == NULL)
		cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand());

	/*
	 * Find the lgrp that encapsulates this CPU's CMT hierarchy
	 */
	lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id);
	if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL)
		lgrp = pg_cmt_lgrp_create(lgrp_handle);

	/*
	 * Ascendingly sort the PGs in the lineage by number of CPUs
	 */
	pg_cmt_hier_sort(cpu_cmt_hier, levels);

	/*
	 * Examine the lineage and validate it.
	 * This routine will also try to fix the lineage along with the
	 * rest of the PG hierarchy should it detect an issue.
	 *
	 * If it returns anything other than VALID or REPAIRED, an
	 * unrecoverable error has occurred, and we cannot proceed.
	 */
	lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels, pgdata);
	if ((lineage_status != CMT_LINEAGE_VALID) &&
	    (lineage_status != CMT_LINEAGE_REPAIRED)) {
		/*
		 * In the case of an unrecoverable error where CMT scheduling
		 * has been disabled, assert that the under construction CPU's
		 * PG data has an empty CMT load balancing lineage.
		 */
		ASSERT((cmt_sched_disabled == 0) ||
		    (GROUP_SIZE(&(pgdata->cmt_pgs)) == 0));
		return;
	}

	/*
	 * For existing PGs in the lineage, verify that the parent is
	 * correct, as the generation in the lineage may have changed
	 * as a result of the sorting. Start the traversal at the top
	 * of the lineage, moving down.
	 */
	for (level = levels - 1; level >= 0; ) {
		int reorg;

		reorg = 0;
		pg = cpu_cmt_hier[level];

		/*
		 * Promote PGs at an incorrect generation into place.
		 */
		while (pg->cmt_parent &&
		    pg->cmt_parent != cpu_cmt_hier[level + 1]) {
			cmt_hier_promote(pg, pgdata);
			reorg++;
		}
		if (reorg > 0)
			level = levels - 1;
		else
			level--;
	}

	/*
	 * For each of the PGs in the CPU's lineage:
	 *	- Add an entry in the CPU sorted CMT PG group
	 *	  which is used for top down CMT load balancing
	 *	- Tie the PG into the CMT hierarchy by connecting
	 *	  it to it's parent and siblings.
	 */
	for (level = 0; level < levels; level++) {
		uint_t		children;
		int		err;

		pg = cpu_cmt_hier[level];
		err = group_add_at(cmt_pgs, pg, levels - level - 1);
		ASSERT(err == 0);

		if (level == 0)
			pgdata->cmt_lineage = (pg_t *)pg;

		if (pg->cmt_siblings != NULL) {
			/* Already initialized */
			ASSERT(pg->cmt_parent == NULL ||
			    pg->cmt_parent == cpu_cmt_hier[level + 1]);
			ASSERT(pg->cmt_siblings == &lgrp->cl_pgs ||
			    ((pg->cmt_parent != NULL) &&
			    pg->cmt_siblings == pg->cmt_parent->cmt_children));
			continue;
		}

		if ((level + 1) == levels) {
			pg->cmt_parent = NULL;

			pg->cmt_siblings = &lgrp->cl_pgs;
			children = ++lgrp->cl_npgs;
			if (cmt_root != lgrp)
				cmt_root->cl_npgs++;
		} else {
			pg->cmt_parent = cpu_cmt_hier[level + 1];

			/*
			 * A good parent keeps track of their children.
			 * The parent's children group is also the PG's
			 * siblings.
			 */
			if (pg->cmt_parent->cmt_children == NULL) {
				pg->cmt_parent->cmt_children =
				    kmem_zalloc(sizeof (group_t), KM_SLEEP);
				group_create(pg->cmt_parent->cmt_children);
			}
			pg->cmt_siblings = pg->cmt_parent->cmt_children;
			children = ++pg->cmt_parent->cmt_nchildren;
		}

		group_expand(pg->cmt_siblings, children);
		group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs);
	}

	/*
	 * Cache the chip and core IDs in the cpu_t->cpu_physid structure
	 * for fast lookups later.
	 */
	if (cp->cpu_physid) {
		cp->cpu_physid->cpu_chipid =
		    pg_plat_hw_instance_id(cp, PGHW_CHIP);
		cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp);

		/*
		 * If this cpu has a PG representing shared cache, then set
		 * cpu_cacheid to that PG's logical id
		 */
		if (pg_cache)
			cp->cpu_physid->cpu_cacheid = pg_cache->pg_id;
	}

	/* CPU0 only initialization */
	if (is_cpu0) {
		is_cpu0 = 0;
		cpu0_lgrp = lgrp;
	}

}
Ejemplo n.º 5
0
/* area_outer and area_isles[i] must be closed non self-intersecting lines
   side: 0 - auto, 1 - right, -1 left
 */
static void buffer_lines(struct line_pnts *area_outer, struct line_pnts **area_isles,
			 int isles_count, int side, double da, double db,
			 double dalpha, int round, int caps, double tol,
			 struct line_pnts **oPoints, struct line_pnts ***iPoints,
			 int *inner_count)
{
    struct planar_graph *pg2;
    struct line_pnts *sPoints, *cPoints;
    struct line_pnts **arrPoints;
    int i, count = 0;
    int res, winding;
    int auto_side;
    int more = 8;
    int allocated = 0;
    double px, py;

    G_debug(3, "buffer_lines()");

    auto_side = (side == 0);

    /* initializations */
    sPoints = Vect_new_line_struct();
    cPoints = Vect_new_line_struct();
    arrPoints = NULL;

    /* outer contour */
    G_debug(3, "    processing outer contour");
    *oPoints = Vect_new_line_struct();
    if (auto_side)
	side =
	    get_polygon_orientation(area_outer->x, area_outer->y,
				    area_outer->n_points -
				    1) ? LEFT_SIDE : RIGHT_SIDE;
    convolution_line(area_outer, da, db, dalpha, side, round, caps, tol,
		     sPoints);
    pg2 = pg_create(sPoints);
    extract_outer_contour(pg2, 0, *oPoints);
    res = extract_inner_contour(pg2, &winding, cPoints);
    while (res != 0) {
	if (winding == 0) {
	    int check_poly = 1;
	    double area_size;

	    dig_find_area_poly(cPoints, &area_size);
	    if (area_size == 0) {
		G_warning(_("zero area size"));
		check_poly = 0;
	    }
	    if (cPoints->x[0] != cPoints->x[cPoints->n_points - 1] ||
		cPoints->y[0] != cPoints->y[cPoints->n_points - 1]) {

		G_warning(_("Line was not closed"));
		check_poly = 0;
	    }

	    if (check_poly && !Vect_point_in_poly(cPoints->x[0], cPoints->y[0], area_outer)) {
		if (Vect_get_point_in_poly(cPoints, &px, &py) == 0) {
		    if (!point_in_buf(area_outer, px, py, da, db, dalpha)) {
			add_line_to_array(cPoints, &arrPoints, &count, &allocated,
					  more);
			cPoints = Vect_new_line_struct();
		    }
		}
		else {
		    G_warning(_("Vect_get_point_in_poly() failed"));
		}
	    }
	}
	res = extract_inner_contour(pg2, &winding, cPoints);
    }
    pg_destroy_struct(pg2);

    /* inner contours */
    G_debug(3, "    processing inner contours");
    for (i = 0; i < isles_count; i++) {
	if (auto_side)
	    side =
		get_polygon_orientation(area_isles[i]->x, area_isles[i]->y,
					area_isles[i]->n_points -
					1) ? RIGHT_SIDE : LEFT_SIDE;
	convolution_line(area_isles[i], da, db, dalpha, side, round, caps,
			 tol, sPoints);
	pg2 = pg_create(sPoints);
	extract_outer_contour(pg2, 0, cPoints);
	res = extract_inner_contour(pg2, &winding, cPoints);
	while (res != 0) {
	    if (winding == -1) {
		int check_poly = 1;
		double area_size;

		dig_find_area_poly(cPoints, &area_size);
		if (area_size == 0) {
		    G_warning(_("zero area size"));
		    check_poly = 0;
		}
		if (cPoints->x[0] != cPoints->x[cPoints->n_points - 1] ||
		    cPoints->y[0] != cPoints->y[cPoints->n_points - 1]) {

		    G_warning(_("Line was not closed"));
		    check_poly = 0;
		}

		/* we need to check if the area is in the buffer.
		   I've simplfied convolution_line(), so that it runs faster,
		   however that leads to ocasional problems */
		if (check_poly && Vect_point_in_poly
		    (cPoints->x[0], cPoints->y[0], area_isles[i])) {
		    if (Vect_get_point_in_poly(cPoints, &px, &py) == 0) {
			if (!point_in_buf(area_isles[i], px, py, da, db, dalpha)) {
			    add_line_to_array(cPoints, &arrPoints, &count,
					      &allocated, more);
			    cPoints = Vect_new_line_struct();
			}
		    }
		    else {
			G_warning(_("Vect_get_point_in_poly() failed"));
		    }
		}
	    }
	    res = extract_inner_contour(pg2, &winding, cPoints);
	}
	pg_destroy_struct(pg2);
    }

    arrPoints = G_realloc(arrPoints, count * sizeof(struct line_pnts *));
    *inner_count = count;
    *iPoints = arrPoints;

    Vect_destroy_line_struct(sPoints);
    Vect_destroy_line_struct(cPoints);

    G_debug(3, "buffer_lines() ... done");

    return;
}