Ejemplo n.º 1
0
/**
 * vos_mem_print_stack_trace() - Print saved stack trace
 * @mem_struct: Pointer to the memory structure which has the saved stack trace
 *              to be printed
 *
 * Return: None
 */
static inline void vos_mem_print_stack_trace(struct s_vos_mem_struct* mem_struct)
{
	VOS_TRACE(VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_FATAL,
		  "Call stack for the source of leaked memory:");

	print_stack_trace(&mem_struct->trace, 1);
}
Ejemplo n.º 2
0
static bool do_daemon_stack_trace(struct tevent_context *ev_ctx,
				  struct messaging_context *msg_ctx,
				  const struct server_id pid,
				  const int argc, const char **argv)
{
	pid_t	dest;
	int	count = 0;

	if (argc != 1) {
		fprintf(stderr, "Usage: smbcontrol <dest> stacktrace\n");
		return False;
	}

	dest = procid_to_pid(&pid);

	if (dest != 0) {
		/* It would be nice to be able to make sure that this PID is
		 * the PID of a smbd/winbind/nmbd process, not some random PID
		 * the user liked the look of. It doesn't seem like it's worth
		 * the effort at the moment, however.
		 */
		print_stack_trace(dest, &count);
	} else {
		connections_forall_read(stack_trace_connection, &count);
	}

	return True;
}
Ejemplo n.º 3
0
void
tracing_print_stack_trace(struct tracing_stack_trace* stackTrace)
{
#if ENABLE_TRACING
	print_stack_trace(stackTrace, kprintf);
#endif
}
Ejemplo n.º 4
0
/*
** Allocate new memory and set it to zero.  Return NULL if
** no memory is available.
*/
void *sqlite3Malloc_(int n, int bZero, char *zFile, int line){
  void *p;
  int *pi;
  int i, k;
  if( n==0 ){
    return 0;
  }
  if( simulatedMallocFailure(n, zFile, line) ){
    return 0;
  }
  sqlite3_memUsed += n;
  if( sqlite3_memMax<sqlite3_memUsed ) sqlite3_memMax = sqlite3_memUsed;
  k = (n+sizeof(int)-1)/sizeof(int);
  pi = malloc( (N_GUARD*2+1+k)*sizeof(int));
  if( pi==0 ){
    if( n>0 ) sqlite3_malloc_failed++;
    return 0;
  }
  sqlite3_nMalloc++;
  for(i=0; i<N_GUARD; i++) pi[i] = 0xdead1122;
  pi[N_GUARD] = n;
  for(i=0; i<N_GUARD; i++) pi[k+1+N_GUARD+i] = 0xdead3344;
  p = &pi[N_GUARD+1];
  memset(p, bZero==0, n);
#if SQLITE_MEMDEBUG>1
  print_stack_trace();
  fprintf(stderr,"%06d malloc %d bytes at 0x%x from %s:%d\n",
      ++memcnt, n, (int)p, zFile,line);
#endif
  return p;
}
Ejemplo n.º 5
0
void
TraceOutput::PrintStackTrace(tracing_stack_trace* stackTrace)
{
#if ENABLE_TRACING
	print_stack_trace(stackTrace, TraceOutputPrint(*this));
	if (stackTrace == NULL || stackTrace->depth <= 0)
		return;

	for (int32 i = 0; i < stackTrace->depth; i++) {
		addr_t address = stackTrace->return_addresses[i];

		const char* symbol;
		const char* imageName;
		bool exactMatch;
		addr_t baseAddress;

		if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
				&imageName, &exactMatch) == B_OK) {
			Print("  %p  %s + 0x%lx (%s)%s\n", (void*)address, symbol,
				address - baseAddress, imageName,
				exactMatch ? "" : " (nearest)");
		} else
			Print("  %p\n", (void*)address);
	}
#endif
}
Ejemplo n.º 6
0
static 
void 
linux_sigsegv_handler(
    int UNUSED_POST(signum), 
    siginfo_t* sidata, 
    void* ctx) 
{
  ucontext_t* ctxptr = ctx;
  char buff_msg[1024];

  int bytecount = snprintf(buff_msg,
                           _countof(buff_msg),
                           "\nSIGSEGV received, faulting address %#08x, errno %d"
                           "\nDumping registers :"
                           "\neip = %#08x"
                           "\neax = %#08x"
                           "\nebx = %#08x"
                           "\necx = %#08x"
                           "\nedx = %#08x"
                           "\nebp = %#08x"
                           "\nesp = %#08x",
                           (uintptr_t) sidata->si_addr,
                           sidata->si_errno,
                           ctxptr->uc_mcontext.gregs[REG_EIP],
                           ctxptr->uc_mcontext.gregs[REG_EAX],
                           ctxptr->uc_mcontext.gregs[REG_EBX],
                           ctxptr->uc_mcontext.gregs[REG_ECX],
                           ctxptr->uc_mcontext.gregs[REG_EDX],
                           ctxptr->uc_mcontext.gregs[REG_EBP],
                           ctxptr->uc_mcontext.gregs[REG_ESP]);
  write(STDERR_FILENO, buff_msg, bytecount);
  print_stack_trace();
  exit(EXIT_FAILURE);
}
Ejemplo n.º 7
0
/* html env may be null */
static void print_freed_chunk_info(struct chunk *c,
				   struct hax *before, struct hax *after,
				   struct fab_html_env *html_env)
{
	char allocated_msg[BUF_SIZE];
	char freed_msg[BUF_SIZE];
	unsigned int pos = 0;

	scnprintf(allocated_msg, BUF_SIZE, "Heap block [0x%x | %d] "
		  "was allocated at:", c->base, c->len);

	pos += scnprintf(freed_msg + pos, BUF_SIZE - pos,
			 "...and, between preemptions ");
	if (after == NULL) {
		pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, "[root]");
	} else {
		pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, "#%d/tid%d",
				 after->depth, after->chosen_thread);
	}
	pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, " and ");
	if (before == NULL) {
		pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, "[latest]");
	} else {
		pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, "#%d/tid%d",
				 before->depth, before->chosen_thread);
	}
	pos += scnprintf(freed_msg + pos, BUF_SIZE - pos, ", freed at:");

	if (html_env == NULL) {
		lsprintf(BUG, "%s", allocated_msg);
		print_stack_trace(BUG, c->malloc_trace);
		printf(BUG, "\n");
		lsprintf(BUG, "%s", freed_msg);
		print_stack_trace(BUG, c->free_trace);
		printf(BUG, "\n");
	} else {
		HTML_PRINTF(html_env, "%s" HTML_NEWLINE, allocated_msg);
		HTML_PRINTF(html_env, "TID %d at:" HTML_NEWLINE, c->malloc_trace->tid);
		HTML_PRINT_STACK_TRACE(html_env, c->malloc_trace);
		HTML_PRINTF(html_env, HTML_NEWLINE HTML_NEWLINE);
		HTML_PRINTF(html_env, "%s" HTML_NEWLINE, freed_msg);
		HTML_PRINTF(html_env, "TID %d at:" HTML_NEWLINE, c->free_trace->tid);
		HTML_PRINT_STACK_TRACE(html_env, c->free_trace);
		HTML_PRINTF(html_env, HTML_NEWLINE);
	}
}
Ejemplo n.º 8
0
static int stack_trace_connection(const struct connections_key *key,
				  const struct connections_data *crec,
				  void *priv)
{
	print_stack_trace(procid_to_pid(&crec->pid), (int *)priv);

	return 0;
}
Ejemplo n.º 9
0
/*
 * The access to some variables in this macro is racy. We can't use atomic_t
 * here because all these variables are exported to debugfs. Some of them even
 * writeable. This is also the reason why a lock won't help much. But anyway,
 * the races are no big deal. Here is why:
 *
 *   error_count: the addition is racy, but the worst thing that can happen is
 *                that we don't count some errors
 *   show_num_errors: the subtraction is racy. Also no big deal because in
 *                    worst case this will result in one warning more in the
 *                    system log than the user configured. This variable is
 *                    writeable via debugfs.
 */
static inline void dump_entry_trace(struct dma_debug_entry *entry)
{
#ifdef CONFIG_STACKTRACE
	if (entry) {
		pr_warning("Mapped at:\n");
		print_stack_trace(&entry->stacktrace, 0);
	}
#endif
}
Ejemplo n.º 10
0
static int stack_trace_server(const struct server_id *id,
			      uint32_t msg_flags,
			      void *priv)
{
	if (procid_is_local(id)) {
		print_stack_trace(procid_to_pid(id), (int *)priv);
	}
	return 0;
}
Ejemplo n.º 11
0
void kmemcheck_error_recall(void)
{
	static const char *desc[] = {
		[KMEMCHECK_SHADOW_UNALLOCATED]		= "unallocated",
		[KMEMCHECK_SHADOW_UNINITIALIZED]	= "uninitialized",
		[KMEMCHECK_SHADOW_INITIALIZED]		= "initialized",
		[KMEMCHECK_SHADOW_FREED]		= "freed",
	};

	static const char short_desc[] = {
		[KMEMCHECK_SHADOW_UNALLOCATED]		= 'a',
		[KMEMCHECK_SHADOW_UNINITIALIZED]	= 'u',
		[KMEMCHECK_SHADOW_INITIALIZED]		= 'i',
		[KMEMCHECK_SHADOW_FREED]		= 'f',
	};

	struct kmemcheck_error *e;
	unsigned int i;

	e = error_next_rd();
	if (!e)
		return;

	switch (e->type) {
	case KMEMCHECK_ERROR_INVALID_ACCESS:
		printk(KERN_ERR  "WARNING: kmemcheck: Caught %d-bit read "
			"from %s memory (%p)\n",
			8 * e->size, e->state < ARRAY_SIZE(desc) ?
				desc[e->state] : "(invalid shadow state)",
			(void *) e->address);

		printk(KERN_INFO);
		for (i = 0; i < SHADOW_COPY_SIZE; ++i)
			printk("%02x", e->memory_copy[i]);
		printk("\n");

		printk(KERN_INFO);
		for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
			if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
				printk(" %c", short_desc[e->shadow_copy[i]]);
			else
				printk(" ?");
		}
		printk("\n");
		printk(KERN_INFO "%*c\n", 2 + 2
			* (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
		break;
	case KMEMCHECK_ERROR_BUG:
		printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
		break;
	}

	__show_regs(&e->regs, 1);
	print_stack_trace(&e->trace, 0);
}
Ejemplo n.º 12
0
/*
** Resize a prior allocation.  If p==0, then this routine
** works just like sqliteMalloc().  If n==0, then this routine
** works just like sqliteFree().
*/
void *sqlite3Realloc_(void *oldP, int n, char *zFile, int line){
  int *oldPi, *pi, i, k, oldN, oldK;
  void *p;
  if( oldP==0 ){
    return sqlite3Malloc_(n,1,zFile,line);
  }
  if( n==0 ){
    sqlite3Free_(oldP,zFile,line);
    return 0;
  }
  if( simulatedMallocFailure(n, zFile, line) ){
    return 0;
  }
  oldPi = oldP;
  oldPi -= N_GUARD+1;
  if( oldPi[0]!=0xdead1122 ){
    fprintf(stderr,"Low-end memory corruption in realloc at 0x%x\n", (int)oldP);
    return 0;
  }
  oldN = oldPi[N_GUARD];
  sqlite3_memUsed -= oldN;
  oldK = (oldN+sizeof(int)-1)/sizeof(int);
  for(i=0; i<N_GUARD; i++){
    if( oldPi[oldK+N_GUARD+1+i]!=0xdead3344 ){
      fprintf(stderr,"High-end memory corruption in realloc at 0x%x\n",
              (int)oldP);
      return 0;
    }
  }
  k = (n + sizeof(int) - 1)/sizeof(int);
  pi = malloc( (k+N_GUARD*2+1)*sizeof(int) );
  if( pi==0 ){
    if( n>0 ) sqlite3_malloc_failed++;
    return 0;
  }
  for(i=0; i<N_GUARD; i++) pi[i] = 0xdead1122;
  pi[N_GUARD] = n;
  sqlite3_memUsed += n;
  if( sqlite3_memMax<sqlite3_memUsed ) sqlite3_memMax = sqlite3_memUsed;
  for(i=0; i<N_GUARD; i++) pi[k+N_GUARD+1+i] = 0xdead3344;
  p = &pi[N_GUARD+1];
  memcpy(p, oldP, n>oldN ? oldN : n);
  if( n>oldN ){
    memset(&((char*)p)[oldN], 0x55, n-oldN);
  }
  memset(oldPi, 0xab, (oldK+N_GUARD+2)*sizeof(int));
  free(oldPi);
#if SQLITE_MEMDEBUG>1
  print_stack_trace();
  fprintf(stderr,"%06d realloc %d to %d bytes at 0x%x to 0x%x at %s:%d\n",
    ++memcnt, oldN, n, (int)oldP, (int)p, zFile, line);
#endif
  return p;
}
Ejemplo n.º 13
0
Archivo: report.c Proyecto: Lyude/linux
static void print_track(struct kasan_track *track, const char *prefix)
{
	pr_err("%s by task %u:\n", prefix, track->pid);
	if (track->stack) {
		struct stack_trace trace;

		depot_fetch_stack(track->stack, &trace);
		print_stack_trace(&trace, 0);
	} else {
		pr_err("(stack is not available)\n");
	}
}
Ejemplo n.º 14
0
static void print_track(struct kasan_track *track)
{
	pr_err("PID = %u\n", track->pid);
	if (track->stack) {
		struct stack_trace trace;

		depot_fetch_stack(track->stack, &trace);
		print_stack_trace(&trace, 0);
	} else {
		pr_err("(stack is not available)\n");
	}
}
Ejemplo n.º 15
0
void backtrace_test_saved(void)
{
	struct stack_trace trace;
	unsigned long entries[8];

	printk("\nThe following trace is a kernel self test and not a bug!\n");

	trace.nr_entries = 0;
	trace.max_entries = ARRAY_SIZE(entries);
	trace.entries = entries;
	trace.skip = 0;

	printk("Testing a dump_stack.\n");
	dump_stack();
	//printk("Testing a save_stack_trace.\n");
	//save_stack_trace(&trace);
	printk("Testing a print_stack_trace.\n");
	print_stack_trace(&trace, 0);
}
Ejemplo n.º 16
0
void wcnss_prealloc_check_memory_leak(void)
{
	int i, j = 0;

	for (i = 0; i < ARRAY_SIZE(wcnss_allocs); i++) {
		if (!wcnss_allocs[i].occupied)
			continue;

		if (j == 0) {
			pr_err("wcnss_prealloc: Memory leak detected\n");
			j++;
		}

		pr_err("Size: %u, addr: %pK, backtrace:\n",
				wcnss_allocs[i].size, wcnss_allocs[i].ptr);
		print_stack_trace(&wcnss_allocs[i].trace, 1);
	}

}
Ejemplo n.º 17
0
static void backtrace_test_saved(void)
{
	struct stack_trace trace;
	unsigned long entries[8];

#ifdef CONFIG_DEBUG_PRINTK
	printk("Testing a saved backtrace.\n");
#else
	;
#endif
#ifdef CONFIG_DEBUG_PRINTK
	printk("The following trace is a kernel self test and not a bug!\n");
#else
	;
#endif

	trace.nr_entries = 0;
	trace.max_entries = ARRAY_SIZE(entries);
	trace.entries = entries;
	trace.skip = 0;

	save_stack_trace(&trace);
	print_stack_trace(&trace, 0);
}
Ejemplo n.º 18
0
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
		struct page *page, struct page_owner *page_owner,
		depot_stack_handle_t handle)
{
	int ret;
	int pageblock_mt, page_mt;
	char *kbuf;
	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = PAGE_OWNER_STACK_DEPTH,
		.skip = 0
	};

	kbuf = kmalloc(count, GFP_KERNEL);
	if (!kbuf)
		return -ENOMEM;

	ret = snprintf(kbuf, count,
			"Page allocated via order %u, mask %#x(%pGg)\n",
			page_owner->order, page_owner->gfp_mask,
			&page_owner->gfp_mask);

	if (ret >= count)
		goto err;

	/* Print information relevant to grouping pages by mobility */
	pageblock_mt = get_pageblock_migratetype(page);
	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
	ret += snprintf(kbuf + ret, count - ret,
			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
			pfn,
			migratetype_names[page_mt],
			pfn >> pageblock_order,
			migratetype_names[pageblock_mt],
			page->flags, &page->flags);

	if (ret >= count)
		goto err;

	depot_fetch_stack(handle, &trace);
	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
	if (ret >= count)
		goto err;

	if (page_owner->last_migrate_reason != -1) {
		ret += snprintf(kbuf + ret, count - ret,
			"Page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_owner->last_migrate_reason]);
		if (ret >= count)
			goto err;
	}

	ret += snprintf(kbuf + ret, count - ret, "\n");
	if (ret >= count)
		goto err;

	if (copy_to_user(buf, kbuf, ret))
		ret = -EFAULT;

	kfree(kbuf);
	return ret;

err:
	kfree(kbuf);
	return -ENOMEM;
}

void __dump_page_owner(struct page *page)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	struct page_owner *page_owner;
	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
	struct stack_trace trace = {
		.nr_entries = 0,
		.entries = entries,
		.max_entries = PAGE_OWNER_STACK_DEPTH,
		.skip = 0
	};
	depot_stack_handle_t handle;
	gfp_t gfp_mask;
	int mt;

	if (unlikely(!page_ext)) {
		pr_alert("There is not page extension available.\n");
		return;
	}

	page_owner = get_page_owner(page_ext);
	gfp_mask = page_owner->gfp_mask;
	mt = gfpflags_to_migratetype(gfp_mask);

	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

	handle = READ_ONCE(page_owner->handle);
	if (!handle) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

	depot_fetch_stack(handle, &trace);
	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
	print_stack_trace(&trace, 0);

	if (page_owner->last_migrate_reason != -1)
		pr_alert("page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_owner->last_migrate_reason]);
}

static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	unsigned long pfn;
	struct page *page;
	struct page_ext *page_ext;
	struct page_owner *page_owner;
	depot_stack_handle_t handle;

	if (!static_branch_unlikely(&page_owner_inited))
		return -EINVAL;

	page = NULL;
	pfn = min_low_pfn + *ppos;

	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
		pfn++;

	drain_all_pages(NULL);

	/* Find an allocated page */
	for (; pfn < max_pfn; pfn++) {
		/*
		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
		 * validate the area as existing, skip it if not
		 */
		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
			pfn += MAX_ORDER_NR_PAGES - 1;
			continue;
		}

		/* Check for holes within a MAX_ORDER area */
		if (!pfn_valid_within(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			if (freepage_order < MAX_ORDER)
				pfn += (1UL << freepage_order) - 1;
			continue;
		}

		page_ext = lookup_page_ext(page);
		if (unlikely(!page_ext))
			continue;

		/*
		 * Some pages could be missed by concurrent allocation or free,
		 * because we don't hold the zone lock.
		 */
		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
			continue;

		page_owner = get_page_owner(page_ext);

		/*
		 * Access to page_ext->handle isn't synchronous so we should
		 * be careful to access it.
		 */
		handle = READ_ONCE(page_owner->handle);
		if (!handle)
			continue;

		/* Record the next PFN to read in the file offset */
		*ppos = (pfn - min_low_pfn) + 1;

		return print_page_owner(buf, count, pfn, page,
				page_owner, handle);
	}

	return 0;
}

static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
	unsigned long pfn = zone->zone_start_pfn;
	unsigned long end_pfn = zone_end_pfn(zone);
	unsigned long count = 0;

	/*
	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
	 * a zone boundary, it will be double counted between zones. This does
	 * not matter as the mixed block count will still be correct
	 */
	for (; pfn < end_pfn; ) {
		unsigned long block_end_pfn;

		if (!pfn_valid(pfn)) {
			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
			continue;
		}

		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		block_end_pfn = min(block_end_pfn, end_pfn);

		for (; pfn < block_end_pfn; pfn++) {
			struct page *page;
			struct page_ext *page_ext;

			if (!pfn_valid_within(pfn))
				continue;

			page = pfn_to_page(pfn);

			if (page_zone(page) != zone)
				continue;

			/*
			 * To avoid having to grab zone->lock, be a little
			 * careful when reading buddy page order. The only
			 * danger is that we skip too much and potentially miss
			 * some early allocated pages, which is better than
			 * heavy lock contention.
			 */
			if (PageBuddy(page)) {
				unsigned long order = page_order_unsafe(page);

				if (order > 0 && order < MAX_ORDER)
					pfn += (1UL << order) - 1;
				continue;
			}

			if (PageReserved(page))
				continue;

			page_ext = lookup_page_ext(page);
			if (unlikely(!page_ext))
				continue;

			/* Maybe overlapping zone */
			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
				continue;

			/* Found early allocated page */
			__set_page_owner_handle(page_ext, early_handle, 0, 0);
			count++;
		}
		cond_resched();
	}

	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
		pgdat->node_id, zone->name, count);
}
Ejemplo n.º 19
0
void VMError::report(outputStream* st, bool _verbose) {

# define BEGIN if (_current_step == 0) { _current_step = __LINE__;
# define STEP(s) } if (_current_step < __LINE__) { _current_step = __LINE__; _current_step_info = s;
# define END }

  // don't allocate large buffer on stack
  static char buf[O_BUFLEN];

  BEGIN

  STEP("printing fatal error message")

    st->print_cr("#");
    if (should_report_bug(_id)) {
      st->print_cr("# A fatal error has been detected by the Java Runtime Environment:");
    } else {
      st->print_cr("# There is insufficient memory for the Java "
                   "Runtime Environment to continue.");
    }

#ifndef PRODUCT
  // Error handler self tests

  // test secondary error handling. Test it twice, to test that resetting
  // error handler after a secondary crash works.
  STEP("test secondary crash 1")
    if (_verbose && TestCrashInErrorHandler != 0) {
      st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
        TestCrashInErrorHandler);
      controlled_crash(TestCrashInErrorHandler);
    }

  STEP("test secondary crash 2")
    if (_verbose && TestCrashInErrorHandler != 0) {
      st->print_cr("Will crash now (TestCrashInErrorHandler=" UINTX_FORMAT ")...",
        TestCrashInErrorHandler);
      controlled_crash(TestCrashInErrorHandler);
    }

  STEP("test safefetch in error handler")
    // test whether it is safe to use SafeFetch32 in Crash Handler. Test twice
    // to test that resetting the signal handler works correctly.
    if (_verbose && TestSafeFetchInErrorHandler) {
      st->print_cr("Will test SafeFetch...");
      if (CanUseSafeFetch32()) {
        int* const invalid_pointer = (int*) get_segfault_address();
        const int x = 0x76543210;
        int i1 = SafeFetch32(invalid_pointer, x);
        int i2 = SafeFetch32(invalid_pointer, x);
        if (i1 == x && i2 == x) {
          st->print_cr("SafeFetch OK."); // Correctly deflected and returned default pattern
        } else {
          st->print_cr("??");
        }
      } else {
        st->print_cr("not possible; skipped.");
      }
    }
#endif // PRODUCT

  STEP("printing type of error")

     switch(_id) {
       case OOM_MALLOC_ERROR:
       case OOM_MMAP_ERROR:
         if (_size) {
           st->print("# Native memory allocation ");
           st->print((_id == (int)OOM_MALLOC_ERROR) ? "(malloc) failed to allocate " :
                                                 "(mmap) failed to map ");
           jio_snprintf(buf, sizeof(buf), SIZE_FORMAT, _size);
           st->print("%s", buf);
           st->print(" bytes");
           if (strlen(_detail_msg) > 0) {
             st->print(" for ");
             st->print("%s", _detail_msg);
           }
           st->cr();
         } else {
           if (strlen(_detail_msg) > 0) {
             st->print("# ");
             st->print_cr("%s", _detail_msg);
           }
         }
         // In error file give some solutions
         if (_verbose) {
           print_oom_reasons(st);
         } else {
           return;  // that's enough for the screen
         }
         break;
       case INTERNAL_ERROR:
       default:
         break;
     }

  STEP("printing exception/signal name")

     st->print_cr("#");
     st->print("#  ");
     // Is it an OS exception/signal?
     if (os::exception_name(_id, buf, sizeof(buf))) {
       st->print("%s", buf);
       st->print(" (0x%x)", _id);                // signal number
       st->print(" at pc=" PTR_FORMAT, p2i(_pc));
     } else {
       if (should_report_bug(_id)) {
         st->print("Internal Error");
       } else {
         st->print("Out of Memory Error");
       }
       if (_filename != NULL && _lineno > 0) {
#ifdef PRODUCT
         // In product mode chop off pathname?
         char separator = os::file_separator()[0];
         const char *p = strrchr(_filename, separator);
         const char *file = p ? p+1 : _filename;
#else
         const char *file = _filename;
#endif
         st->print(" (%s:%d)", file, _lineno);
       } else {
         st->print(" (0x%x)", _id);
       }
     }

  STEP("printing current thread and pid")

     // process id, thread id
     st->print(", pid=%d", os::current_process_id());
     st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
     st->cr();

  STEP("printing error message")

     if (should_report_bug(_id)) {  // already printed the message.
       // error message
       if (strlen(_detail_msg) > 0) {
         st->print_cr("#  %s: %s", _message ? _message : "Error", _detail_msg);
       } else if (_message) {
         st->print_cr("#  Error: %s", _message);
       }
     }

  STEP("printing Java version string")

     report_vm_version(st, buf, sizeof(buf));

  STEP("printing problematic frame")

     // Print current frame if we have a context (i.e. it's a crash)
     if (_context) {
       st->print_cr("# Problematic frame:");
       st->print("# ");
       frame fr = os::fetch_frame_from_context(_context);
       fr.print_on_error(st, buf, sizeof(buf));
       st->cr();
       st->print_cr("#");
     }

  STEP("printing core file information")
    st->print("# ");
    if (CreateCoredumpOnCrash) {
      if (coredump_status) {
        st->print("Core dump will be written. Default location: %s", coredump_message);
      } else {
        st->print("No core dump will be written. %s", coredump_message);
      }
    } else {
      st->print("CreateCoredumpOnCrash turned off, no core file dumped");
    }
    st->cr();
    st->print_cr("#");

  STEP("printing bug submit message")

     if (should_report_bug(_id) && _verbose) {
       print_bug_submit_message(st, _thread);
     }

  STEP("printing summary")

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  S U M M A R Y ------------");
       st->cr();
     }

  STEP("printing VM option summary")

     if (_verbose) {
       // VM options
       Arguments::print_summary_on(st);
       st->cr();
     }

  STEP("printing summary machine and OS info")

     if (_verbose) {
       os::print_summary_info(st, buf, sizeof(buf));
     }


  STEP("printing date and time")

     if (_verbose) {
       os::print_date_and_time(st, buf, sizeof(buf));
     }

  STEP("printing thread")

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  T H R E A D  ---------------");
       st->cr();
     }

  STEP("printing current thread")

     // current thread
     if (_verbose) {
       if (_thread) {
         st->print("Current thread (" PTR_FORMAT "):  ", p2i(_thread));
         _thread->print_on_error(st, buf, sizeof(buf));
         st->cr();
       } else {
         st->print_cr("Current thread is native thread");
       }
       st->cr();
     }

  STEP("printing current compile task")

     if (_verbose && _thread && _thread->is_Compiler_thread()) {
        CompilerThread* t = (CompilerThread*)_thread;
        if (t->task()) {
           st->cr();
           st->print_cr("Current CompileTask:");
           t->task()->print_line_on_error(st, buf, sizeof(buf));
           st->cr();
        }
     }


  STEP("printing stack bounds")

     if (_verbose) {
       st->print("Stack: ");

       address stack_top;
       size_t stack_size;

       if (_thread) {
          stack_top = _thread->stack_base();
          stack_size = _thread->stack_size();
       } else {
          stack_top = os::current_stack_base();
          stack_size = os::current_stack_size();
       }

       address stack_bottom = stack_top - stack_size;
       st->print("[" PTR_FORMAT "," PTR_FORMAT "]", p2i(stack_bottom), p2i(stack_top));

       frame fr = _context ? os::fetch_frame_from_context(_context)
                           : os::current_frame();

       if (fr.sp()) {
         st->print(",  sp=" PTR_FORMAT, p2i(fr.sp()));
         size_t free_stack_size = pointer_delta(fr.sp(), stack_bottom, 1024);
         st->print(",  free space=" SIZE_FORMAT "k", free_stack_size);
       }

       st->cr();
     }

  STEP("printing native stack")

   if (_verbose) {
     if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
       // We have printed the native stack in platform-specific code
       // Windows/x64 needs special handling.
     } else {
       frame fr = _context ? os::fetch_frame_from_context(_context)
                           : os::current_frame();

       print_native_stack(st, fr, _thread, buf, sizeof(buf));
     }
   }

  STEP("printing Java stack")

     if (_verbose && _thread && _thread->is_Java_thread()) {
       print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
     }

  STEP("printing target Java thread stack")

     // printing Java thread stack trace if it is involved in GC crash
     if (_verbose && _thread && (_thread->is_Named_thread())) {
       JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
       if (jt != NULL) {
         st->print_cr("JavaThread " PTR_FORMAT " (nid = %d) was being processed", p2i(jt), jt->osthread()->thread_id());
         print_stack_trace(st, jt, buf, sizeof(buf), true);
       }
     }

  STEP("printing siginfo")

     // signal no, signal code, address that caused the fault
     if (_verbose && _siginfo) {
       st->cr();
       os::print_siginfo(st, _siginfo);
       st->cr();
     }

  STEP("CDS archive access warning")

     // Print an explicit hint if we crashed on access to the CDS archive.
     if (_verbose && _siginfo) {
       check_failing_cds_access(st, _siginfo);
       st->cr();
     }

  STEP("printing register info")

     // decode register contents if possible
     if (_verbose && _context && Universe::is_fully_initialized()) {
       os::print_register_info(st, _context);
       st->cr();
     }

  STEP("printing registers, top of stack, instructions near pc")

     // registers, top of stack, instructions near pc
     if (_verbose && _context) {
       os::print_context(st, _context);
       st->cr();
     }

  STEP("printing code blob if possible")

     if (_verbose && _context) {
       CodeBlob* cb = CodeCache::find_blob(_pc);
       if (cb != NULL) {
         if (Interpreter::contains(_pc)) {
           // The interpreter CodeBlob is very large so try to print the codelet instead.
           InterpreterCodelet* codelet = Interpreter::codelet_containing(_pc);
           if (codelet != NULL) {
             codelet->print_on(st);
             Disassembler::decode(codelet->code_begin(), codelet->code_end(), st);
           }
         } else {
           StubCodeDesc* desc = StubCodeDesc::desc_for(_pc);
           if (desc != NULL) {
             desc->print_on(st);
             Disassembler::decode(desc->begin(), desc->end(), st);
           } else {
             Disassembler::decode(cb, st);
             st->cr();
           }
         }
       }
     }

  STEP("printing VM operation")

     if (_verbose && _thread && _thread->is_VM_thread()) {
        VMThread* t = (VMThread*)_thread;
        VM_Operation* op = t->vm_operation();
        if (op) {
          op->print_on_error(st);
          st->cr();
          st->cr();
        }
     }

  STEP("printing process")

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  P R O C E S S  ---------------");
       st->cr();
     }

  STEP("printing all threads")

     // all threads
     if (_verbose && _thread) {
       Threads::print_on_error(st, _thread, buf, sizeof(buf));
       st->cr();
     }

  STEP("printing VM state")

     if (_verbose) {
       // Safepoint state
       st->print("VM state:");

       if (SafepointSynchronize::is_synchronizing()) st->print("synchronizing");
       else if (SafepointSynchronize::is_at_safepoint()) st->print("at safepoint");
       else st->print("not at safepoint");

       // Also see if error occurred during initialization or shutdown
       if (!Universe::is_fully_initialized()) {
         st->print(" (not fully initialized)");
       } else if (VM_Exit::vm_exited()) {
         st->print(" (shutting down)");
       } else {
         st->print(" (normal execution)");
       }
       st->cr();
       st->cr();
     }

  STEP("printing owned locks on error")

     // mutexes/monitors that currently have an owner
     if (_verbose) {
       print_owned_locks_on_error(st);
       st->cr();
     }

  STEP("printing number of OutOfMemoryError and StackOverflow exceptions")

     if (_verbose && Exceptions::has_exception_counts()) {
       st->print_cr("OutOfMemory and StackOverflow Exception counts:");
       Exceptions::print_exception_counts_on_error(st);
       st->cr();
     }

  STEP("printing compressed oops mode")

     if (_verbose && UseCompressedOops) {
       Universe::print_compressed_oops_mode(st);
       if (UseCompressedClassPointers) {
         Metaspace::print_compressed_class_space(st);
       }
       st->cr();
     }

  STEP("printing heap information")

     if (_verbose && Universe::is_fully_initialized()) {
       Universe::heap()->print_on_error(st);
       st->cr();
       st->print_cr("Polling page: " INTPTR_FORMAT, p2i(os::get_polling_page()));
       st->cr();
     }

  STEP("printing code cache information")

     if (_verbose && Universe::is_fully_initialized()) {
       // print code cache information before vm abort
       CodeCache::print_summary(st);
       st->cr();
     }

  STEP("printing ring buffers")

     if (_verbose) {
       Events::print_all(st);
       st->cr();
     }

  STEP("printing dynamic libraries")

     if (_verbose) {
       // dynamic libraries, or memory map
       os::print_dll_info(st);
       st->cr();
     }

  STEP("printing VM options")

     if (_verbose) {
       // VM options
       Arguments::print_on(st);
       st->cr();
     }

  STEP("printing warning if internal testing API used")

     if (WhiteBox::used()) {
       st->print_cr("Unsupported internal testing APIs have been used.");
       st->cr();
     }

  STEP("printing log configuration")
    if (_verbose){
      st->print_cr("Logging:");
      LogConfiguration::describe_current_configuration(st);
      st->cr();
    }

  STEP("printing all environment variables")

     if (_verbose) {
       os::print_environment_variables(st, env_list);
       st->cr();
     }

  STEP("printing signal handlers")

     if (_verbose) {
       os::print_signal_handlers(st, buf, sizeof(buf));
       st->cr();
     }

  STEP("Native Memory Tracking")
     if (_verbose) {
       MemTracker::error_report(st);
     }

  STEP("printing system")

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  S Y S T E M  ---------------");
       st->cr();
     }

  STEP("printing OS information")

     if (_verbose) {
       os::print_os_info(st);
       st->cr();
     }

  STEP("printing CPU info")
     if (_verbose) {
       os::print_cpu_info(st, buf, sizeof(buf));
       st->cr();
     }

  STEP("printing memory info")

     if (_verbose) {
       os::print_memory_info(st);
       st->cr();
     }

  STEP("printing internal vm info")

     if (_verbose) {
       st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
       st->cr();
     }

  // print a defined marker to show that error handling finished correctly.
  STEP("printing end marker")

     if (_verbose) {
       st->print_cr("END.");
     }

  END

# undef BEGIN
# undef STEP
# undef END
}
Ejemplo n.º 20
0
void inner(int k)
{
  print_stack_trace();
}
Ejemplo n.º 21
0
void VMError::report(outputStream* st) {
# define BEGIN if (_current_step == 0) { _current_step = 1;
# define STEP(n, s) } if (_current_step < n) { _current_step = n; _current_step_info = s;
# define END }

  // don't allocate large buffer on stack
  static char buf[O_BUFLEN];

  BEGIN

  STEP(10, "(printing fatal error message)")

    st->print_cr("#");
    if (should_report_bug(_id)) {
      st->print_cr("# A fatal error has been detected by the Java Runtime Environment:");
    } else {
      st->print_cr("# There is insufficient memory for the Java "
                   "Runtime Environment to continue.");
    }

  STEP(15, "(printing type of error)")

     switch(_id) {
       case oom_error:
         if (_size) {
           st->print("# Native memory allocation (malloc) failed to allocate ");
           jio_snprintf(buf, sizeof(buf), SIZE_FORMAT, _size);
           st->print(buf);
           st->print(" bytes");
           if (_message != NULL) {
             st->print(" for ");
             st->print(_message);
           }
           st->cr();
         } else {
           if (_message != NULL)
             st->print("# ");
             st->print_cr(_message);
         }
         // In error file give some solutions
         if (_verbose) {
           st->print_cr("# Possible reasons:");
           st->print_cr("#   The system is out of physical RAM or swap space");
           st->print_cr("#   In 32 bit mode, the process size limit was hit");
           st->print_cr("# Possible solutions:");
           st->print_cr("#   Reduce memory load on the system");
           st->print_cr("#   Increase physical memory or swap space");
           st->print_cr("#   Check if swap backing store is full");
           st->print_cr("#   Use 64 bit Java on a 64 bit OS");
           st->print_cr("#   Decrease Java heap size (-Xmx/-Xms)");
           st->print_cr("#   Decrease number of Java threads");
           st->print_cr("#   Decrease Java thread stack sizes (-Xss)");
           st->print_cr("#   Set larger code cache with -XX:ReservedCodeCacheSize=");
           st->print_cr("# This output file may be truncated or incomplete.");
         } else {
           return;  // that's enough for the screen
         }
         break;
       case internal_error:
       default:
         break;
     }

  STEP(20, "(printing exception/signal name)")

     st->print_cr("#");
     st->print("#  ");
     // Is it an OS exception/signal?
     if (os::exception_name(_id, buf, sizeof(buf))) {
       st->print("%s", buf);
       st->print(" (0x%x)", _id);                // signal number
       st->print(" at pc=" PTR_FORMAT, _pc);
     } else {
       if (should_report_bug(_id)) {
         st->print("Internal Error");
       } else {
         st->print("Out of Memory Error");
       }
       if (_filename != NULL && _lineno > 0) {
#ifdef PRODUCT
         // In product mode chop off pathname?
         char separator = os::file_separator()[0];
         const char *p = strrchr(_filename, separator);
         const char *file = p ? p+1 : _filename;
#else
         const char *file = _filename;
#endif
         size_t len = strlen(file);
         size_t buflen = sizeof(buf);

         strncpy(buf, file, buflen);
         if (len + 10 < buflen) {
           sprintf(buf + len, ":%d", _lineno);
         }
         st->print(" (%s)", buf);
       } else {
         st->print(" (0x%x)", _id);
       }
     }

  STEP(30, "(printing current thread and pid)")

     // process id, thread id
     st->print(", pid=%d", os::current_process_id());
     st->print(", tid=" UINTX_FORMAT, os::current_thread_id());
     st->cr();

  STEP(40, "(printing error message)")

     if (should_report_bug(_id)) {  // already printed the message.
       // error message
       if (_detail_msg) {
         st->print_cr("#  %s: %s", _message ? _message : "Error", _detail_msg);
       } else if (_message) {
         st->print_cr("#  Error: %s", _message);
       }
    }

  STEP(50, "(printing Java version string)")

     // VM version
     st->print_cr("#");
     JDK_Version::current().to_string(buf, sizeof(buf));
     st->print_cr("# JRE version: %s", buf);
     st->print_cr("# Java VM: %s (%s %s %s %s)",
                   Abstract_VM_Version::vm_name(),
                   Abstract_VM_Version::vm_release(),
                   Abstract_VM_Version::vm_info_string(),
                   Abstract_VM_Version::vm_platform_string(),
                   UseCompressedOops ? "compressed oops" : ""
                 );

  STEP(60, "(printing problematic frame)")

     // Print current frame if we have a context (i.e. it's a crash)
     if (_context) {
       st->print_cr("# Problematic frame:");
       st->print("# ");
       frame fr = os::fetch_frame_from_context(_context);
       fr.print_on_error(st, buf, sizeof(buf));
       st->cr();
       st->print_cr("#");
     }
  STEP(63, "(printing core file information)")
    st->print("# ");
    if (coredump_status) {
      st->print("Core dump written. Default location: %s", coredump_message);
    } else {
      st->print("Failed to write core dump. %s", coredump_message);
    }
    st->print_cr("");
    st->print_cr("#");

  STEP(65, "(printing bug submit message)")

     if (should_report_bug(_id) && _verbose) {
       print_bug_submit_message(st, _thread);
     }

  STEP(70, "(printing thread)" )

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  T H R E A D  ---------------");
       st->cr();
     }

  STEP(80, "(printing current thread)" )

     // current thread
     if (_verbose) {
       if (_thread) {
         st->print("Current thread (" PTR_FORMAT "):  ", _thread);
         _thread->print_on_error(st, buf, sizeof(buf));
         st->cr();
       } else {
         st->print_cr("Current thread is native thread");
       }
       st->cr();
     }

  STEP(90, "(printing siginfo)" )

     // signal no, signal code, address that caused the fault
     if (_verbose && _siginfo) {
       os::print_siginfo(st, _siginfo);
       st->cr();
     }

  STEP(100, "(printing registers, top of stack, instructions near pc)")

     // registers, top of stack, instructions near pc
     if (_verbose && _context) {
       os::print_context(st, _context);
       st->cr();
     }

  STEP(105, "(printing register info)")

     // decode register contents if possible
     if (_verbose && _context && Universe::is_fully_initialized()) {
       os::print_register_info(st, _context);
       st->cr();
     }

  STEP(110, "(printing stack bounds)" )

     if (_verbose) {
       st->print("Stack: ");

       address stack_top;
       size_t stack_size;

       if (_thread) {
          stack_top = _thread->stack_base();
          stack_size = _thread->stack_size();
       } else {
          stack_top = os::current_stack_base();
          stack_size = os::current_stack_size();
       }

       address stack_bottom = stack_top - stack_size;
       st->print("[" PTR_FORMAT "," PTR_FORMAT "]", stack_bottom, stack_top);

       frame fr = _context ? os::fetch_frame_from_context(_context)
                           : os::current_frame();

       if (fr.sp()) {
         st->print(",  sp=" PTR_FORMAT, fr.sp());
         size_t free_stack_size = pointer_delta(fr.sp(), stack_bottom, 1024);
         st->print(",  free space=" SIZE_FORMAT "k", free_stack_size);
       }

       st->cr();
     }

  STEP(120, "(printing native stack)" )

     if (_verbose) {
       frame fr = _context ? os::fetch_frame_from_context(_context)
                           : os::current_frame();

       // see if it's a valid frame
       if (fr.pc()) {
          st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");


          int count = 0;
          while (count++ < StackPrintLimit) {
             fr.print_on_error(st, buf, sizeof(buf));
             st->cr();
             if (os::is_first_C_frame(&fr)) break;
             fr = os::get_sender_for_C_frame(&fr);
          }

          if (count > StackPrintLimit) {
             st->print_cr("...<more frames>...");
          }

          st->cr();
       }
     }

  STEP(130, "(printing Java stack)" )

     if (_verbose && _thread && _thread->is_Java_thread()) {
       print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
     }

  STEP(135, "(printing target Java thread stack)" )

     // printing Java thread stack trace if it is involved in GC crash
     if (_verbose && _thread && (_thread->is_Named_thread())) {
       JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
       if (jt != NULL) {
         st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id());
         print_stack_trace(st, jt, buf, sizeof(buf), true);
       }
     }

  STEP(140, "(printing VM operation)" )

     if (_verbose && _thread && _thread->is_VM_thread()) {
        VMThread* t = (VMThread*)_thread;
        VM_Operation* op = t->vm_operation();
        if (op) {
          op->print_on_error(st);
          st->cr();
          st->cr();
        }
     }

  STEP(150, "(printing current compile task)" )

     if (_verbose && _thread && _thread->is_Compiler_thread()) {
        CompilerThread* t = (CompilerThread*)_thread;
        if (t->task()) {
           st->cr();
           st->print_cr("Current CompileTask:");
           t->task()->print_line_on_error(st, buf, sizeof(buf));
           st->cr();
        }
     }

  STEP(160, "(printing process)" )

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  P R O C E S S  ---------------");
       st->cr();
     }

  STEP(170, "(printing all threads)" )

     // all threads
     if (_verbose && _thread) {
       Threads::print_on_error(st, _thread, buf, sizeof(buf));
       st->cr();
     }

  STEP(175, "(printing VM state)" )

     if (_verbose) {
       // Safepoint state
       st->print("VM state:");

       if (SafepointSynchronize::is_synchronizing()) st->print("synchronizing");
       else if (SafepointSynchronize::is_at_safepoint()) st->print("at safepoint");
       else st->print("not at safepoint");

       // Also see if error occurred during initialization or shutdown
       if (!Universe::is_fully_initialized()) {
         st->print(" (not fully initialized)");
       } else if (VM_Exit::vm_exited()) {
         st->print(" (shutting down)");
       } else {
         st->print(" (normal execution)");
       }
       st->cr();
       st->cr();
     }

  STEP(180, "(printing owned locks on error)" )

     // mutexes/monitors that currently have an owner
     if (_verbose) {
       print_owned_locks_on_error(st);
       st->cr();
     }

  STEP(190, "(printing heap information)" )

     if (_verbose && Universe::is_fully_initialized()) {
       // Print heap information before vm abort. As we'd like as much
       // information as possible in the report we ask for the
       // extended (i.e., more detailed) version.
       Universe::print_on(st, true /* extended */);
       st->cr();

       Universe::heap()->barrier_set()->print_on(st);
       st->cr();

       st->print_cr("Polling page: " INTPTR_FORMAT, os::get_polling_page());
       st->cr();
     }

  STEP(195, "(printing code cache information)" )

     if (_verbose && Universe::is_fully_initialized()) {
       // print code cache information before vm abort
       CodeCache::print_bounds(st);
       st->cr();
     }

  STEP(200, "(printing ring buffers)" )

     if (_verbose) {
       Events::print_all(st);
       st->cr();
     }

  STEP(205, "(printing dynamic libraries)" )

     if (_verbose) {
       // dynamic libraries, or memory map
       os::print_dll_info(st);
       st->cr();
     }

  STEP(210, "(printing VM options)" )

     if (_verbose) {
       // VM options
       Arguments::print_on(st);
       st->cr();
     }

  STEP(220, "(printing environment variables)" )

     if (_verbose) {
       os::print_environment_variables(st, env_list, buf, sizeof(buf));
       st->cr();
     }

  STEP(225, "(printing signal handlers)" )

     if (_verbose) {
       os::print_signal_handlers(st, buf, sizeof(buf));
       st->cr();
     }

  STEP(230, "" )

     if (_verbose) {
       st->cr();
       st->print_cr("---------------  S Y S T E M  ---------------");
       st->cr();
     }

  STEP(240, "(printing OS information)" )

     if (_verbose) {
       os::print_os_info(st);
       st->cr();
     }

  STEP(250, "(printing CPU info)" )
     if (_verbose) {
       os::print_cpu_info(st);
       st->cr();
     }

  STEP(260, "(printing memory info)" )

     if (_verbose) {
       os::print_memory_info(st);
       st->cr();
     }

  STEP(270, "(printing internal vm info)" )

     if (_verbose) {
       st->print_cr("vm_info: %s", Abstract_VM_Version::internal_vm_info_string());
       st->cr();
     }

  STEP(280, "(printing date and time)" )

     if (_verbose) {
       os::print_date_and_time(st);
       st->cr();
     }

  END

# undef BEGIN
# undef STEP
# undef END
}
Ejemplo n.º 22
0
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
{
	struct page_ext *page_ext = lookup_page_ext(page);

	struct stack_trace trace = {
		.nr_entries = 0,
		.max_entries = ARRAY_SIZE(page_ext->trace_entries),
		.entries = &page_ext->trace_entries[0],
		.skip = 3,
	};

	if (unlikely(!page_ext))
		return;

	save_stack_trace(&trace);

	page_ext->order = order;
	page_ext->gfp_mask = gfp_mask;
	page_ext->nr_entries = trace.nr_entries;
	page_ext->last_migrate_reason = -1;

	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}

void __set_page_owner_migrate_reason(struct page *page, int reason)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	if (unlikely(!page_ext))
		return;

	page_ext->last_migrate_reason = reason;
}

gfp_t __get_page_owner_gfp(struct page *page)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	if (unlikely(!page_ext))
		/*
		 * The caller just returns 0 if no valid gfp
		 * So return 0 here too.
		 */
		return 0;

	return page_ext->gfp_mask;
}

void __copy_page_owner(struct page *oldpage, struct page *newpage)
{
	struct page_ext *old_ext = lookup_page_ext(oldpage);
	struct page_ext *new_ext = lookup_page_ext(newpage);
	int i;

	if (unlikely(!old_ext || !new_ext))
		return;

	new_ext->order = old_ext->order;
	new_ext->gfp_mask = old_ext->gfp_mask;
	new_ext->nr_entries = old_ext->nr_entries;

	for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++)
		new_ext->trace_entries[i] = old_ext->trace_entries[i];

	/*
	 * We don't clear the bit on the oldpage as it's going to be freed
	 * after migration. Until then, the info can be useful in case of
	 * a bug, and the overal stats will be off a bit only temporarily.
	 * Also, migrate_misplaced_transhuge_page() can still fail the
	 * migration and then we want the oldpage to retain the info. But
	 * in that case we also don't need to explicitly clear the info from
	 * the new page, which will be freed.
	 */
	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
}

static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
		struct page *page, struct page_ext *page_ext)
{
	int ret;
	int pageblock_mt, page_mt;
	char *kbuf;
	struct stack_trace trace = {
		.nr_entries = page_ext->nr_entries,
		.entries = &page_ext->trace_entries[0],
	};

	kbuf = kmalloc(count, GFP_KERNEL);
	if (!kbuf)
		return -ENOMEM;

	ret = snprintf(kbuf, count,
			"Page allocated via order %u, mask %#x(%pGg)\n",
			page_ext->order, page_ext->gfp_mask,
			&page_ext->gfp_mask);

	if (ret >= count)
		goto err;

	/* Print information relevant to grouping pages by mobility */
	pageblock_mt = get_pageblock_migratetype(page);
	page_mt  = gfpflags_to_migratetype(page_ext->gfp_mask);
	ret += snprintf(kbuf + ret, count - ret,
			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
			pfn,
			migratetype_names[page_mt],
			pfn >> pageblock_order,
			migratetype_names[pageblock_mt],
			page->flags, &page->flags);

	if (ret >= count)
		goto err;

	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
	if (ret >= count)
		goto err;

	if (page_ext->last_migrate_reason != -1) {
		ret += snprintf(kbuf + ret, count - ret,
			"Page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_ext->last_migrate_reason]);
		if (ret >= count)
			goto err;
	}

	ret += snprintf(kbuf + ret, count - ret, "\n");
	if (ret >= count)
		goto err;

	if (copy_to_user(buf, kbuf, ret))
		ret = -EFAULT;

	kfree(kbuf);
	return ret;

err:
	kfree(kbuf);
	return -ENOMEM;
}

void __dump_page_owner(struct page *page)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	struct stack_trace trace = {
		.nr_entries = page_ext->nr_entries,
		.entries = &page_ext->trace_entries[0],
	};
	gfp_t gfp_mask = page_ext->gfp_mask;
	int mt = gfpflags_to_migratetype(gfp_mask);

	if (unlikely(!page_ext)) {
		pr_alert("There is not page extension available.\n");
		return;
	}

	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
		pr_alert("page_owner info is not active (free page?)\n");
		return;
	}

	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
		 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
	print_stack_trace(&trace, 0);

	if (page_ext->last_migrate_reason != -1)
		pr_alert("page has been migrated, last migrate reason: %s\n",
			migrate_reason_names[page_ext->last_migrate_reason]);
}

static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	unsigned long pfn;
	struct page *page;
	struct page_ext *page_ext;

	if (!static_branch_unlikely(&page_owner_inited))
		return -EINVAL;

	page = NULL;
	pfn = min_low_pfn + *ppos;

	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
		pfn++;

	drain_all_pages(NULL);

	/* Find an allocated page */
	for (; pfn < max_pfn; pfn++) {
		/*
		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
		 * validate the area as existing, skip it if not
		 */
		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
			pfn += MAX_ORDER_NR_PAGES - 1;
			continue;
		}

		/* Check for holes within a MAX_ORDER area */
		if (!pfn_valid_within(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			if (freepage_order < MAX_ORDER)
				pfn += (1UL << freepage_order) - 1;
			continue;
		}

		page_ext = lookup_page_ext(page);
		if (unlikely(!page_ext))
			continue;

		/*
		 * Some pages could be missed by concurrent allocation or free,
		 * because we don't hold the zone lock.
		 */
		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
			continue;

		/* Record the next PFN to read in the file offset */
		*ppos = (pfn - min_low_pfn) + 1;

		return print_page_owner(buf, count, pfn, page, page_ext);
	}

	return 0;
}

static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
	struct page *page;
	struct page_ext *page_ext;
	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
	unsigned long end_pfn = pfn + zone->spanned_pages;
	unsigned long count = 0;

	/* Scan block by block. First and last block may be incomplete */
	pfn = zone->zone_start_pfn;

	/*
	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
	 * a zone boundary, it will be double counted between zones. This does
	 * not matter as the mixed block count will still be correct
	 */
	for (; pfn < end_pfn; ) {
		if (!pfn_valid(pfn)) {
			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
			continue;
		}

		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		block_end_pfn = min(block_end_pfn, end_pfn);

		page = pfn_to_page(pfn);

		for (; pfn < block_end_pfn; pfn++) {
			if (!pfn_valid_within(pfn))
				continue;

			page = pfn_to_page(pfn);

			if (page_zone(page) != zone)
				continue;

			/*
			 * We are safe to check buddy flag and order, because
			 * this is init stage and only single thread runs.
			 */
			if (PageBuddy(page)) {
				pfn += (1UL << page_order(page)) - 1;
				continue;
			}

			if (PageReserved(page))
				continue;

			page_ext = lookup_page_ext(page);
			if (unlikely(!page_ext))
				continue;

			/* Maybe overraping zone */
			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
				continue;

			/* Found early allocated page */
			set_page_owner(page, 0, 0);
			count++;
		}
	}

	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
		pgdat->node_id, zone->name, count);
}

static void init_zones_in_node(pg_data_t *pgdat)
{
	struct zone *zone;
	struct zone *node_zones = pgdat->node_zones;
	unsigned long flags;

	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
		if (!populated_zone(zone))
			continue;

		spin_lock_irqsave(&zone->lock, flags);
		init_pages_in_zone(pgdat, zone);
		spin_unlock_irqrestore(&zone->lock, flags);
	}
}

static void init_early_allocated_pages(void)
{
	pg_data_t *pgdat;

	drain_all_pages(NULL);
	for_each_online_pgdat(pgdat)
		init_zones_in_node(pgdat);
}