示例#1
0
static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
                     polling_group *b) {
  for (;;) {
    if (a == b) {
      pg_unref(a);
      pg_unref(b);
      return;
    }
    if (a > b) GPR_SWAP(polling_group *, a, b);
    gpr_mu_lock(&a->po.mu);
    gpr_mu_lock(&b->po.mu);
    if (a->po.group != NULL) {
      polling_group *m2 = pg_ref(a->po.group);
      gpr_mu_unlock(&a->po.mu);
      gpr_mu_unlock(&b->po.mu);
      pg_unref(a);
      a = m2;
    } else if (b->po.group != NULL) {
      polling_group *m2 = pg_ref(b->po.group);
      gpr_mu_unlock(&a->po.mu);
      gpr_mu_unlock(&b->po.mu);
      pg_unref(b);
      b = m2;
    } else {
      break;
    }
  }
  polling_group **unref = NULL;
  size_t unref_count = 0;
  size_t unref_cap = 0;
  b->po.group = a;
  pg_broadcast(exec_ctx, a, b);
  pg_broadcast(exec_ctx, b, a);
  while (b->po.next != &b->po) {
    polling_obj *po = b->po.next;
    gpr_mu_lock(&po->mu);
    if (unref_count == unref_cap) {
      unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
      unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
    }
    unref[unref_count++] = po->group;
    po->group = pg_ref(a);
    // unlink from b
    po->prev->next = po->next;
    po->next->prev = po->prev;
    // link to a
    po->next = &a->po;
    po->prev = a->po.prev;
    po->next->prev = po->prev->next = po;
    gpr_mu_unlock(&po->mu);
  }
  gpr_mu_unlock(&a->po.mu);
  gpr_mu_unlock(&b->po.mu);
  for (size_t i = 0; i < unref_count; i++) {
    pg_unref(unref[i]);
  }
  gpr_free(unref);
  pg_unref(b);
}
示例#2
0
static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
                    polling_obj *po) {
  /* assumes neither pg nor po are locked; consumes one ref to pg */
  pg = pg_lock_latest(pg);
  /* pg locked */
  for (polling_obj *existing = pg->po.next /* skip pg - it's just a stub */;
       existing != &pg->po; existing = existing->next) {
    if (po_cmp(po, existing) < 0) {
      gpr_mu_lock(&po->mu);
      gpr_mu_lock(&existing->mu);
    } else {
      GPR_ASSERT(po_cmp(po, existing) != 0);
      gpr_mu_lock(&existing->mu);
      gpr_mu_lock(&po->mu);
    }
    /* pg, po, existing locked */
    if (po->group != NULL) {
      gpr_mu_unlock(&pg->po.mu);
      polling_group *po_group = pg_ref(po->group);
      gpr_mu_unlock(&po->mu);
      gpr_mu_unlock(&existing->mu);
      pg_merge(exec_ctx, pg, po_group);
      /* early exit: polling obj picked up a group during joining: we needed
         to do a full merge */
      return;
    }
    pg_notify(exec_ctx, po, existing);
    gpr_mu_unlock(&po->mu);
    gpr_mu_unlock(&existing->mu);
  }
  gpr_mu_lock(&po->mu);
  if (po->group != NULL) {
    gpr_mu_unlock(&pg->po.mu);
    polling_group *po_group = pg_ref(po->group);
    gpr_mu_unlock(&po->mu);
    pg_merge(exec_ctx, pg, po_group);
    /* early exit: polling obj picked up a group during joining: we needed
       to do a full merge */
    return;
  }
  po->group = pg;
  po->next = &pg->po;
  po->prev = pg->po.prev;
  po->prev->next = po->next->prev = po;
  gpr_mu_unlock(&pg->po.mu);
  gpr_mu_unlock(&po->mu);
}
示例#3
0
static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
  switch (po_cmp(a, b)) {
    case 0:
      return;
    case 1:
      GPR_SWAP(polling_obj *, a, b);
    /* fall through */
    case -1:
      gpr_mu_lock(&a->mu);
      gpr_mu_lock(&b->mu);

      if (a->group == NULL) {
        if (b->group == NULL) {
          polling_obj *initial_po[] = {a, b};
          pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po));
          gpr_mu_unlock(&a->mu);
          gpr_mu_unlock(&b->mu);
        } else {
          polling_group *b_group = pg_ref(b->group);
          gpr_mu_unlock(&b->mu);
          gpr_mu_unlock(&a->mu);
          pg_join(exec_ctx, b_group, a);
        }
      } else if (b->group == NULL) {
        polling_group *a_group = pg_ref(a->group);
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
        pg_join(exec_ctx, a_group, b);
      } else if (a->group == b->group) {
        /* nothing to do */
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
      } else {
        polling_group *a_group = pg_ref(a->group);
        polling_group *b_group = pg_ref(b->group);
        gpr_mu_unlock(&a->mu);
        gpr_mu_unlock(&b->mu);
        pg_merge(exec_ctx, a_group, b_group);
      }
  }
}
示例#4
0
static polling_group *pg_lock_latest(polling_group *pg) {
  /* assumes pg unlocked; consumes ref, returns ref */
  gpr_mu_lock(&pg->po.mu);
  while (pg->po.group != NULL) {
    polling_group *new_pg = pg_ref(pg->po.group);
    gpr_mu_unlock(&pg->po.mu);
    pg_unref(pg);
    pg = new_pg;
    gpr_mu_lock(&pg->po.mu);
  }
  return pg;
}
示例#5
0
文件: pagein.c 项目: bochf/testing
int pgin_all(my_region_t *Caller_Supplied_Regions,  int lock_in_memory)
{
	pid_t mypid=getpid();
	/* For data (heap) section */
	void *my_edata;
	/* For Loader / Shared lib section */
	struct ld_xinfo *p_ld_xinfo;
	char *ar;
	char ldbuf[LOADQUERY_BUFFER];
	/* For stats */
	uint64_t pg_cnt;
	struct procentry64 p[6];

	/* Initial stats checkpoint */
	stats_checkpoint_initial(&p[0]);

 	/*****************************
	 * FAST PAGE-IN DIRECTIVE    *
 	 *****************************
	 */
	 if (lock_in_memory)
	 {
	 	/* Shortcut ! */
	 	if (mlockall(MCL_CURRENT|MCL_FUTURE) >= 0)
		{
			/* Great.  mlockall forces the page-ins across the address space.  Page-outs will be resisted.  OK for a small memory size.
			 * For a large memory size relative to the host system, don't lock... use lock_in_memory=0. */
			printf ("MLOCKALL\n");
			stats_checkpoint(mypid, pg_cnt, &p[0], &p[1]);
			return 0;
		}
		else
	 		fprintf(stderr,"Page locking in memory was requested.  mlockall returned %s, error code %d.  Continuing...\n",strerror(errno),errno);
			/* Fall through into the soft handler */
	}
 
 	/*****************************
	 * MANUAL PAGE-IN ALGORITHM  *
 	 *****************************
	 */

	/* Text region */
	/* The Loader section will also cover text */
	printf ("TEXT: pg_ref(_text=0x%p, _etext=0x%p, %d, %d)\n", &_text, &_etext, 0, PAGE_TOUCH_RDONLY);
	pg_cnt=pg_ref(&_text, &_etext, 0, PAGE_TOUCH_RDONLY);

	stats_checkpoint(mypid, pg_cnt, &p[0], &p[1]);



	/* Data region */
	my_edata=sbrk(0);
	printf ("DATA: pg_ref(_data=0x%p, _edata=0x%p, %d, %d)\n", &_data, my_edata, 0, PAGE_TOUCH_RDONLY);
	pg_ref(&_data, my_edata, 0, PAGE_TOUCH_RDONLY);           /* Multi-threaded: read-only- not thread-safe  */
	/*pg_cnt=pg_ref(&_data, my_edata, 0, PAGE_TOUCH_RDWR);*/  /* Single-threaded: read-write */

	stats_checkpoint(mypid, pg_cnt, &p[1], &p[2]);



	/* Loader / Shared Library text & data regions */
	if ((loadquery(L_GETXINFO, (void*)ldbuf, sizeof(ldbuf)))<0)
	{
		if (errno==ENOMEM)
	 		fprintf(stderr,"loadquery returned %s, error code %d.  Increase the static buffers or recode to make it dynamic.\n",strerror(errno),errno);
		else
	 		fprintf(stderr,"loadquery returned %s, error code %d.\n",strerror(errno),errno);
		return EXIT_FAILURE;
	}

	for (p_ld_xinfo=(struct ld_xinfo*) ldbuf;  p_ld_xinfo;  p_ld_xinfo = (p_ld_xinfo->ldinfo_next ? (void*)p_ld_xinfo + p_ld_xinfo->ldinfo_next : NULL))
	{
		ar = (char*)p_ld_xinfo + p_ld_xinfo->ldinfo_filename;
		printf ("%s:  file=\"%s\"  member=\"%s\"  text=0x%015" PRIX64 "  text_len=0x%016" PRIX64 ",  data=0x%015" PRIX64 "  data_len=0x%016" PRIX64 ",  tdata=0x%015" PRIX64 "  tdata_len=0x%016" PRIX64 ",  tbss_len=0x%016" PRIX64 "\n",
		 "LOAD+SHLIB",
		 ar,
		 ar + strlen(ar) + 1,
		 (ptr64_t)p_ld_xinfo->ldinfo_textorg,
		 (uint64_t)p_ld_xinfo->ldinfo_textsize,
		 (ptr64_t)p_ld_xinfo->ldinfo_dataorg,
		 (uint64_t)p_ld_xinfo->ldinfo_datasize,
		 (ptr64_t)p_ld_xinfo->ldinfo_tdataorg,
		 (uint64_t)p_ld_xinfo->ldinfo_tdatasize,
		 (uint64_t)p_ld_xinfo->ldinfo_tbsssize);
		printf ("text\n");
		pg_cnt=pg_ref((void*)(p_ld_xinfo->ldinfo_textorg), (void*)(p_ld_xinfo->ldinfo_textorg) + p_ld_xinfo->ldinfo_textsize, 0, PAGE_TOUCH_RDONLY);
		stats_checkpoint(mypid, pg_cnt, &p[2], &p[3]);

		printf ("data\n");
		pg_cnt=pg_ref((void*)(p_ld_xinfo->ldinfo_dataorg), (void*)(p_ld_xinfo->ldinfo_dataorg) + p_ld_xinfo->ldinfo_datasize, 0, PAGE_TOUCH_RDONLY);
		stats_checkpoint(mypid, pg_cnt, &p[3], &p[4]);
		
		memcpy(&p[2],&p[4],sizeof(p[2])); /* Reinit p[2] for next loop */
	}
	


	/* Stack */
	/* Skipping this - low quantity / low value */
	;



	/* Caller Supplied Regions
	 * Includes shared memory, i.e. shmget/shmat and mmap
	 * There is no C API method to derive shared memory mappings using
	 * public interfaces, up to the time of writing, July 2014 / AIX 7.1 TL3.
	 * svmon -P <pid> & procmap -S <pid>  derive this using the 
	 * private, undocumented kernel performance extensions, specifically 
    * getvsidsandprocl_pid() and ptx_getsegstat().
	 * Until IBM supplies a public API or guidance on the private interfaces
	 * these cannot be derived.   No consideration is given to calling out
	 * externally to svmon, because this call is way too slow.
	 *
	 * In the meantime, the caller can identify the shared segments to map.
	 */
	 if (Caller_Supplied_Regions)
	 {
	 	int i;
	 	for (i=0; Caller_Supplied_Regions[i].addr; i++)
		{
			printf("CALLER SUPPLIED REGION #%d:  0x%p - 0x%p\n", i, (void*)(Caller_Supplied_Regions[i].addr), (void*)(Caller_Supplied_Regions[i].addr) + Caller_Supplied_Regions[i].size);
			pg_cnt=pg_ref((void*)(Caller_Supplied_Regions[i].addr), (void*)(Caller_Supplied_Regions[i].addr) + Caller_Supplied_Regions[i].size, 0, PAGE_TOUCH_RDONLY);

			stats_checkpoint(mypid, pg_cnt, &p[4], &p[5]);
			memcpy(&p[4],&p[5],sizeof(p[4])); /* Reinit p[4] for next loop */
		}
	 }
	 
}