示例#1
0
/* Check expired timers */
static uint32_t ktimers_check(void)
{
    struct ktimer *t; 
    struct ktimer t_previous;
    if (!ktimer_list)
        return -1;
    if (!ktimer_list->n)
        return -1;
    t = heap_first(ktimer_list);
    while ((t) && (t->expire_time < jiffies)) {
        if (t->handler) {
            t->handler(jiffies, t->arg);
        }
        heap_peek(ktimer_list, &t_previous); 
        t = heap_first(ktimer_list);
    }
    return (t->expire_time - jiffies);
}
示例#2
0
void timer_check(void)
{
	evquick_timer_instance t, *first;
	unsigned long long now = gettimeofdayms();
	first = heap_first(ctx->timers);
	while(first && (first->expire <= now)) {
		heap_peek(ctx->timers, &t);
		if (!t.ev_timer) {
			first = heap_first(ctx->timers);
			continue;
		}
		if (t.ev_timer->flags & EVQUICK_EV_DISABLED) {
			/* Timer was disabled in the meanwhile.
			 * Take no action, and destroy it.
			 */
			free(t.ev_timer);
		} else if (t.ev_timer->flags & EVQUICK_EV_RETRIGGER) {
			timer_trigger(t.ev_timer, now, now + t.ev_timer->interval);
			t.ev_timer->callback(t.ev_timer->arg);
			/* Don't free the timer, reuse for next instance
			 * that has just been scheduled.
			 */
		} else {
			/* One shot, invoke callback,
			 * then destroy the timer. */
			t.ev_timer->callback(t.ev_timer->arg);
			free(t.ev_timer);
		}
		first = heap_first(ctx->timers);
	}
	if(first) {
		unsigned long long interval = first->expire - now;
		if (interval >= 1000)
			alarm((unsigned)(interval / 1000));
		else
			ualarm((useconds_t) (1000 * (first->expire - now)), 0);
	}
}
示例#3
0
void*
receive_data_messages_thread()
{
	while(true)
	{
		pthread_mutex_lock(&data_message_queue_mutex);

		while(heap_is_empty(data_message_queue))
		{
			debug(5, "wait for data message timeout");
			pthread_cond_wait(&data_message_queue_cond, &data_message_queue_mutex);
		}

		bool wait = true;
		struct timespec timeout;
		struct data_message* message = *(struct data_message**) heap_first(data_message_queue);

		//while(wait && message->state != COMPLETE && message->receive_time + DATA_MESSAGE_TIMEOUT // TODO
	}
}
示例#4
0
static void timer_trigger(evquick_timer *t, unsigned long long now,
	unsigned long long expire)
{
	evquick_timer_instance tev, *first;
	tev.ev_timer = t;
	tev.expire = expire;
	heap_insert(ctx->timers, &tev);
	first = heap_first(ctx->timers);
	if (first) {
		unsigned long long interval;
		if (now >= first->expire) {
			ualarm(1000, 0);
			return;
		}
		interval = first->expire - now;
		if (interval >= 1000)
			alarm((unsigned)(interval / 1000));
		else
			ualarm((useconds_t)(1000 * (first->expire - now)), 0);
	}
}
示例#5
0
int RAND_poll(void)
{
	MEMORYSTATUS m;
	HCRYPTPROV hProvider = 0;
	DWORD w;
	int good = 0;

	/* Determine the OS version we are on so we can turn off things 
	 * that do not work properly.
	 */
        OSVERSIONINFO osverinfo ;
        osverinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO) ;
        GetVersionEx( &osverinfo ) ;

#if defined(OPENSSL_SYS_WINCE)
# if defined(_WIN32_WCE) && _WIN32_WCE>=300
/* Even though MSDN says _WIN32_WCE>=210, it doesn't seem to be available
 * in commonly available implementations prior 300... */
	{
	BYTE buf[64];
	/* poll the CryptoAPI PRNG */
	/* The CryptoAPI returns sizeof(buf) bytes of randomness */
	if (CryptAcquireContextW(&hProvider, NULL, NULL, PROV_RSA_FULL,
				CRYPT_VERIFYCONTEXT))
		{
		if (CryptGenRandom(hProvider, sizeof(buf), buf))
			RAND_add(buf, sizeof(buf), sizeof(buf));
		CryptReleaseContext(hProvider, 0); 
		}
	}
# endif
#else	/* OPENSSL_SYS_WINCE */
	/*
	 * None of below libraries are present on Windows CE, which is
	 * why we #ifndef the whole section. This also excuses us from
	 * handling the GetProcAddress issue. The trouble is that in
	 * real Win32 API GetProcAddress is available in ANSI flavor
	 * only. In WinCE on the other hand GetProcAddress is a macro
	 * most commonly defined as GetProcAddressW, which accepts
	 * Unicode argument. If we were to call GetProcAddress under
	 * WinCE, I'd recommend to either redefine GetProcAddress as
	 * GetProcAddressA (there seem to be one in common CE spec) or
	 * implement own shim routine, which would accept ANSI argument
	 * and expand it to Unicode.
	 */
	{
	/* load functions dynamically - not available on all systems */
	HMODULE advapi = LoadLibrary(TEXT("ADVAPI32.DLL"));
	HMODULE kernel = LoadLibrary(TEXT("KERNEL32.DLL"));
	HMODULE user = NULL;
	HMODULE netapi = LoadLibrary(TEXT("NETAPI32.DLL"));
	CRYPTACQUIRECONTEXTW acquire = NULL;
	CRYPTGENRANDOM gen = NULL;
	CRYPTRELEASECONTEXT release = NULL;
	NETSTATGET netstatget = NULL;
	NETFREE netfree = NULL;
	BYTE buf[64];

	if (netapi)
		{
		netstatget = (NETSTATGET) GetProcAddress(netapi,"NetStatisticsGet");
		netfree = (NETFREE) GetProcAddress(netapi,"NetApiBufferFree");
		}

	if (netstatget && netfree)
		{
		LPBYTE outbuf;
		/* NetStatisticsGet() is a Unicode only function
 		 * STAT_WORKSTATION_0 contains 45 fields and STAT_SERVER_0
		 * contains 17 fields.  We treat each field as a source of
		 * one byte of entropy.
                 */

		if (netstatget(NULL, L"LanmanWorkstation", 0, 0, &outbuf) == 0)
			{
			RAND_add(outbuf, sizeof(STAT_WORKSTATION_0), 45);
			netfree(outbuf);
			}
		if (netstatget(NULL, L"LanmanServer", 0, 0, &outbuf) == 0)
			{
			RAND_add(outbuf, sizeof(STAT_SERVER_0), 17);
			netfree(outbuf);
			}
		}

	if (netapi)
		FreeLibrary(netapi);

        /* It appears like this can cause an exception deep within ADVAPI32.DLL
         * at random times on Windows 2000.  Reported by Jeffrey Altman.  
         * Only use it on NT.
	 */
	/* Wolfgang Marczy <*****@*****.**> reports that
	 * the RegQueryValueEx call below can hang on NT4.0 (SP6).
	 * So we don't use this at all for now. */
#if 0
        if ( osverinfo.dwPlatformId == VER_PLATFORM_WIN32_NT &&
		osverinfo.dwMajorVersion < 5)
		{
		/* Read Performance Statistics from NT/2000 registry
		 * The size of the performance data can vary from call
		 * to call so we must guess the size of the buffer to use
		 * and increase its size if we get an ERROR_MORE_DATA
		 * return instead of ERROR_SUCCESS.
		 */
		LONG   rc=ERROR_MORE_DATA;
		char * buf=NULL;
		DWORD bufsz=0;
		DWORD length;

		while (rc == ERROR_MORE_DATA)
			{
			buf = realloc(buf,bufsz+8192);
			if (!buf)
				break;
			bufsz += 8192;

			length = bufsz;
			rc = RegQueryValueEx(HKEY_PERFORMANCE_DATA, TEXT("Global"),
				NULL, NULL, buf, &length);
			}
		if (rc == ERROR_SUCCESS)
			{
                        /* For entropy count assume only least significant
			 * byte of each DWORD is random.
			 */
			RAND_add(&length, sizeof(length), 0);
			RAND_add(buf, length, length / 4.0);

			/* Close the Registry Key to allow Windows to cleanup/close
			 * the open handle
			 * Note: The 'HKEY_PERFORMANCE_DATA' key is implicitly opened
			 *       when the RegQueryValueEx above is done.  However, if
			 *       it is not explicitly closed, it can cause disk
			 *       partition manipulation problems.
			 */
			RegCloseKey(HKEY_PERFORMANCE_DATA);
			}
		if (buf)
			free(buf);
		}
#endif

	if (advapi)
		{
		/*
		 * If it's available, then it's available in both ANSI
		 * and UNICODE flavors even in Win9x, documentation says.
		 * We favor Unicode...
		 */
		acquire = (CRYPTACQUIRECONTEXTW) GetProcAddress(advapi,
			"CryptAcquireContextW");
		gen = (CRYPTGENRANDOM) GetProcAddress(advapi,
			"CryptGenRandom");
		release = (CRYPTRELEASECONTEXT) GetProcAddress(advapi,
			"CryptReleaseContext");
		}

	if (acquire && gen && release)
		{
		/* poll the CryptoAPI PRNG */
                /* The CryptoAPI returns sizeof(buf) bytes of randomness */
		if (acquire(&hProvider, NULL, NULL, PROV_RSA_FULL,
			CRYPT_VERIFYCONTEXT))
			{
			if (gen(hProvider, sizeof(buf), buf) != 0)
				{
				RAND_add(buf, sizeof(buf), 0);
				good = 1;
#if 0
				printf("randomness from PROV_RSA_FULL\n");
#endif
				}
			release(hProvider, 0); 
			}
		
		/* poll the Pentium PRG with CryptoAPI */
		if (acquire(&hProvider, 0, INTEL_DEF_PROV, PROV_INTEL_SEC, 0))
			{
			if (gen(hProvider, sizeof(buf), buf) != 0)
				{
				RAND_add(buf, sizeof(buf), sizeof(buf));
				good = 1;
#if 0
				printf("randomness from PROV_INTEL_SEC\n");
#endif
				}
			release(hProvider, 0);
			}
		}

        if (advapi)
		FreeLibrary(advapi);

	if ((osverinfo.dwPlatformId != VER_PLATFORM_WIN32_NT ||
	     !OPENSSL_isservice()) &&
	    (user = LoadLibrary(TEXT("USER32.DLL"))))
		{
		GETCURSORINFO cursor;
		GETFOREGROUNDWINDOW win;
		GETQUEUESTATUS queue;

		win = (GETFOREGROUNDWINDOW) GetProcAddress(user, "GetForegroundWindow");
		cursor = (GETCURSORINFO) GetProcAddress(user, "GetCursorInfo");
		queue = (GETQUEUESTATUS) GetProcAddress(user, "GetQueueStatus");

		if (win)
			{
			/* window handle */
			HWND h = win();
			RAND_add(&h, sizeof(h), 0);
			}
		if (cursor)
			{
			/* unfortunately, its not safe to call GetCursorInfo()
			 * on NT4 even though it exists in SP3 (or SP6) and
			 * higher.
			 */
			if ( osverinfo.dwPlatformId == VER_PLATFORM_WIN32_NT &&
				osverinfo.dwMajorVersion < 5)
				cursor = 0;
			}
		if (cursor)
			{
			/* cursor position */
                        /* assume 2 bytes of entropy */
			CURSORINFO ci;
			ci.cbSize = sizeof(CURSORINFO);
			if (cursor(&ci))
				RAND_add(&ci, ci.cbSize, 2);
			}

		if (queue)
			{
			/* message queue status */
                        /* assume 1 byte of entropy */
			w = queue(QS_ALLEVENTS);
			RAND_add(&w, sizeof(w), 1);
			}

		FreeLibrary(user);
		}

	/* Toolhelp32 snapshot: enumerate processes, threads, modules and heap
	 * http://msdn.microsoft.com/library/psdk/winbase/toolhelp_5pfd.htm
	 * (Win 9x and 2000 only, not available on NT)
	 *
	 * This seeding method was proposed in Peter Gutmann, Software
	 * Generation of Practically Strong Random Numbers,
	 * http://www.usenix.org/publications/library/proceedings/sec98/gutmann.html
	 * revised version at http://www.cryptoengines.com/~peter/06_random.pdf
	 * (The assignment of entropy estimates below is arbitrary, but based
	 * on Peter's analysis the full poll appears to be safe. Additional
	 * interactive seeding is encouraged.)
	 */

	if (kernel)
		{
		CREATETOOLHELP32SNAPSHOT snap;
		CLOSETOOLHELP32SNAPSHOT close_snap;
		HANDLE handle;

		HEAP32FIRST heap_first;
		HEAP32NEXT heap_next;
		HEAP32LIST heaplist_first, heaplist_next;
		PROCESS32 process_first, process_next;
		THREAD32 thread_first, thread_next;
		MODULE32 module_first, module_next;

		HEAPLIST32 hlist;
		HEAPENTRY32 hentry;
		PROCESSENTRY32 p;
		THREADENTRY32 t;
		MODULEENTRY32 m;
		DWORD starttime = 0;

		snap = (CREATETOOLHELP32SNAPSHOT)
			GetProcAddress(kernel, "CreateToolhelp32Snapshot");
		close_snap = (CLOSETOOLHELP32SNAPSHOT)
			GetProcAddress(kernel, "CloseToolhelp32Snapshot");
		heap_first = (HEAP32FIRST) GetProcAddress(kernel, "Heap32First");
		heap_next = (HEAP32NEXT) GetProcAddress(kernel, "Heap32Next");
		heaplist_first = (HEAP32LIST) GetProcAddress(kernel, "Heap32ListFirst");
		heaplist_next = (HEAP32LIST) GetProcAddress(kernel, "Heap32ListNext");
		process_first = (PROCESS32) GetProcAddress(kernel, "Process32First");
		process_next = (PROCESS32) GetProcAddress(kernel, "Process32Next");
		thread_first = (THREAD32) GetProcAddress(kernel, "Thread32First");
		thread_next = (THREAD32) GetProcAddress(kernel, "Thread32Next");
		module_first = (MODULE32) GetProcAddress(kernel, "Module32First");
		module_next = (MODULE32) GetProcAddress(kernel, "Module32Next");

		if (snap && heap_first && heap_next && heaplist_first &&
			heaplist_next && process_first && process_next &&
			thread_first && thread_next && module_first &&
			module_next && (handle = snap(TH32CS_SNAPALL,0))
			!= INVALID_HANDLE_VALUE)
			{
			/* heap list and heap walking */
                        /* HEAPLIST32 contains 3 fields that will change with
                         * each entry.  Consider each field a source of 1 byte
                         * of entropy.
                         * HEAPENTRY32 contains 5 fields that will change with 
                         * each entry.  Consider each field a source of 1 byte
                         * of entropy.
                         */
			ZeroMemory(&hlist, sizeof(HEAPLIST32));
			hlist.dwSize = sizeof(HEAPLIST32);		
			if (good) starttime = GetTickCount();
#ifdef _MSC_VER
			if (heaplist_first(handle, &hlist))
				{
				/*
				   following discussion on dev ML, exception on WinCE (or other Win
				   platform) is theoretically of unknown origin; prevent infinite
				   loop here when this theoretical case occurs; otherwise cope with
				   the expected (MSDN documented) exception-throwing behaviour of
				   Heap32Next() on WinCE.

				   based on patch in original message by Tanguy Fautré (2009/03/02)
			           Subject: RAND_poll() and CreateToolhelp32Snapshot() stability
			     */
				int ex_cnt_limit = 42; 
				do
					{
					RAND_add(&hlist, hlist.dwSize, 3);
					__try
						{
						ZeroMemory(&hentry, sizeof(HEAPENTRY32));
					hentry.dwSize = sizeof(HEAPENTRY32);
					if (heap_first(&hentry,
						hlist.th32ProcessID,
						hlist.th32HeapID))
						{
						int entrycnt = 80;
						do
							RAND_add(&hentry,
								hentry.dwSize, 5);
						while (heap_next(&hentry)
						&& (!good || (GetTickCount()-starttime)<MAXDELAY)
							&& --entrycnt > 0);
						}
						}
					__except (EXCEPTION_EXECUTE_HANDLER)
						{
							/* ignore access violations when walking the heap list */
							ex_cnt_limit--;
						}
					} while (heaplist_next(handle, &hlist) 
						&& (!good || (GetTickCount()-starttime)<MAXDELAY)
						&& ex_cnt_limit > 0);
				}

#else
			if (heaplist_first(handle, &hlist))
				{
				do
					{
					RAND_add(&hlist, hlist.dwSize, 3);
					hentry.dwSize = sizeof(HEAPENTRY32);
					if (heap_first(&hentry,
						hlist.th32ProcessID,
						hlist.th32HeapID))
						{
						int entrycnt = 80;
						do
							RAND_add(&hentry,
								hentry.dwSize, 5);
						while (heap_next(&hentry)
							&& --entrycnt > 0);
						}
					} while (heaplist_next(handle, &hlist) 
						&& (!good || (GetTickCount()-starttime)<MAXDELAY));
				}
#endif

			/* process walking */
                        /* PROCESSENTRY32 contains 9 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			p.dwSize = sizeof(PROCESSENTRY32);
		
			if (good) starttime = GetTickCount();
			if (process_first(handle, &p))
				do
					RAND_add(&p, p.dwSize, 9);
				while (process_next(handle, &p) && (!good || (GetTickCount()-starttime)<MAXDELAY));

			/* thread walking */
                        /* THREADENTRY32 contains 6 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			t.dwSize = sizeof(THREADENTRY32);
			if (good) starttime = GetTickCount();
			if (thread_first(handle, &t))
				do
					RAND_add(&t, t.dwSize, 6);
				while (thread_next(handle, &t) && (!good || (GetTickCount()-starttime)<MAXDELAY));

			/* module walking */
                        /* MODULEENTRY32 contains 9 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			m.dwSize = sizeof(MODULEENTRY32);
			if (good) starttime = GetTickCount();
			if (module_first(handle, &m))
				do
					RAND_add(&m, m.dwSize, 9);
				while (module_next(handle, &m)
					       	&& (!good || (GetTickCount()-starttime)<MAXDELAY));
			if (close_snap)
				close_snap(handle);
			else
				CloseHandle(handle);

			}

		FreeLibrary(kernel);
		}
示例#6
0
tcpalpha
colap_primero(const colap c) {
	return heap_first(c);
}
示例#7
0
/*
 * xstats_update_class_statistics () -  Updates the statistics for the objects
 *                                    of a given class
 *   return:
 *   class_id(in): Identifier of the class
 *
 * Note: It first retrieves the whole catalog information about this class,
 *       including all possible forms of disk representations for the instance
 *       objects. Then, it performs a complete pass on the heap file of the
 *       class, reading in all of the instance objects one by one and
 *       calculating the ranges of numeric attribute values (ie. min. & max.
 *       values for each numeric attribute).
 *
 *       During this pass on the heap file, these values are maintained
 *       separately for objects with the same representation. Each minimum and
 *       maximum value is initialized when the first instance of the class
 *       with the corresponding representation is encountered. These values are
 *       continually updated as attribute values exceeding the known range are
 *       encountered. At the end of this pass, these individual ranges for
 *       each representation are uniformed in the last (the current)
 *       representation, building the global range values for the attributes
 *       of the class. Then, the btree statistical information is obtained for
 *       each attribute that is indexed and stored in this final representation
 *       structure. Finally, a new timestamp is obtained for these class
 *       statistics and they are stored to disk within the catalog structure
 *       for the last class representation.
 */
int
xstats_update_class_statistics (THREAD_ENTRY * thread_p, OID * class_id_p)
{
  CLS_INFO *cls_info_p = NULL;
  REPR_ID repr_id;
  DISK_REPR *disk_repr_p = NULL;
  DISK_ATTR *disk_attr_p = NULL;
  BTREE_STATS *btree_stats_p = NULL;
  HEAP_SCANCACHE hf_scan_cache, *hf_scan_cache_p = NULL;
  HEAP_CACHE_ATTRINFO hf_cache_attr_info, *hf_cache_attr_info_p = NULL;
  RECDES recdes;
  OID oid;
  SCAN_CODE scan_rc;
  DB_VALUE *db_value_p;
  DB_DATA *db_data_p;
  int i, j;

  cls_info_p = catalog_get_class_info (thread_p, class_id_p);
  if (cls_info_p == NULL)
    {
      goto error;
    }

  /* if class information was not obtained */
  if (cls_info_p->hfid.vfid.fileid < 0 || cls_info_p->hfid.vfid.volid < 0)
    {
      /* The class does not have a heap file (i.e. it has no instances);
         so no statistics can be obtained for this class; just set
         'tot_objects' field to 0 and return. */

      cls_info_p->tot_objects = 0;

      if (catalog_add_class_info (thread_p, class_id_p, cls_info_p) !=
	  NO_ERROR)
	{
	  goto error;
	}

      catalog_free_class_info (cls_info_p);
      return NO_ERROR;
    }

  if (catalog_get_last_representation_id (thread_p, class_id_p, &repr_id) !=
      NO_ERROR)
    {
      goto error;
    }

  disk_repr_p = catalog_get_representation (thread_p, class_id_p, repr_id);
  if (disk_repr_p == NULL)
    {
      goto error;
    }

  cls_info_p->tot_pages = file_get_numpages (thread_p,
					     &cls_info_p->hfid.vfid);
  cls_info_p->tot_objects = 0;
  disk_repr_p->num_objects = 0;

  /* scan whole object of the class and update the statistics */

  if (heap_scancache_start (thread_p, &hf_scan_cache, &(cls_info_p->hfid),
			    class_id_p, true, false,
			    LOCKHINT_NONE) != NO_ERROR)
    {
      goto error;
    }

  hf_scan_cache_p = &hf_scan_cache;

  if (heap_attrinfo_start (thread_p, class_id_p, -1, NULL,
			   &hf_cache_attr_info) != NO_ERROR)
    {
      goto error;
    }
  hf_cache_attr_info_p = &hf_cache_attr_info;

  /* Obtain minimum and maximum value of the instances for each attribute of
     the class and count the number of objects by scanning heap file */

  recdes.area_size = -1;
  scan_rc = heap_first (thread_p, &(cls_info_p->hfid), class_id_p, &oid,
			&recdes, hf_scan_cache_p, PEEK);

  while (scan_rc == S_SUCCESS)
    {
      if (heap_attrinfo_read_dbvalues (thread_p, &oid, &recdes,
				       hf_cache_attr_info_p) != NO_ERROR)
	{
	  scan_rc = S_ERROR;
	  break;
	}

      /* Consider attributes only whose type are fixed because min/max value
         statistics are useful only for those type when calculating the cost
         of query plan by query optimizer. Variable type attributes, for
         example VARCHAR(STRING), take constant number of selectivity. */

      for (i = 0; i < disk_repr_p->n_fixed; i++)
	{
	  disk_attr_p = &(disk_repr_p->fixed[i]);

	  db_value_p = heap_attrinfo_access (disk_attr_p->id,
					     hf_cache_attr_info_p);
	  if (db_value_p != NULL && db_value_is_null (db_value_p) != true)
	    {
	      db_data_p = db_value_get_db_data (db_value_p);

	      if (disk_repr_p->num_objects == 0)
		{
		  /* first object */
		  disk_attr_p->min_value = *db_data_p;
		  disk_attr_p->max_value = *db_data_p;
		}
	      else
		{
		  /* compare with previous values */
		  if (stats_compare_data (db_data_p, &disk_attr_p->min_value,
					  disk_attr_p->type) < 0)
		    {
		      disk_attr_p->min_value = *db_data_p;
		    }

		  if (stats_compare_data (db_data_p, &disk_attr_p->max_value,
					  disk_attr_p->type) > 0)
		    {
		      disk_attr_p->max_value = *db_data_p;
		    }
		}
	    }
	}

      cls_info_p->tot_objects++;
      disk_repr_p->num_objects++;

      scan_rc = heap_next (thread_p, &(cls_info_p->hfid), class_id_p, &oid,
			   &recdes, hf_scan_cache_p, PEEK);
    }

  if (scan_rc == S_ERROR)
    {
      goto error;
    }

  heap_attrinfo_end (thread_p, hf_cache_attr_info_p);
  if (heap_scancache_end (thread_p, hf_scan_cache_p) != NO_ERROR)
    {
      goto error;
    }

  /* update the index statistics for each attribute */

  for (i = 0; i < disk_repr_p->n_fixed + disk_repr_p->n_variable; i++)
    {
      if (i < disk_repr_p->n_fixed)
	{
	  disk_attr_p = disk_repr_p->fixed + i;
	}
      else
	{
	  disk_attr_p = disk_repr_p->variable + (i - disk_repr_p->n_fixed);
	}

      for (j = 0, btree_stats_p = disk_attr_p->bt_stats;
	   j < disk_attr_p->n_btstats; j++, btree_stats_p++)
	{
	  if (btree_get_stats (thread_p, &btree_stats_p->btid, btree_stats_p,
			       true) != NO_ERROR)
	    {
	      goto error;
	    }
	}
    }

  /* replace the current disk representation structure/information in the
     catalog with the newly computed statistics */

  if (catalog_add_representation (thread_p, class_id_p, repr_id, disk_repr_p)
      != NO_ERROR)
    {
      goto error;
    }

  cls_info_p->time_stamp = stats_get_time_stamp ();

  if (catalog_add_class_info (thread_p, class_id_p, cls_info_p) != NO_ERROR)
    {
      goto error;
    }

  if (disk_repr_p)
    {
      catalog_free_representation (disk_repr_p);
    }

  if (cls_info_p)
    {
      catalog_free_class_info (cls_info_p);
    }

  return NO_ERROR;

error:
  if (hf_cache_attr_info_p)
    {
      heap_attrinfo_end (thread_p, hf_cache_attr_info_p);
    }

  if (hf_scan_cache_p)
    {
      (void) heap_scancache_end (thread_p, hf_scan_cache_p);
    }

  if (disk_repr_p)
    {
      catalog_free_representation (disk_repr_p);
    }

  if (cls_info_p)
    {
      catalog_free_class_info (cls_info_p);
    }

  return er_errid ();
}
int RAND_poll(void)
{
	MEMORYSTATUS m;
	HCRYPTPROV hProvider = 0;
	BYTE buf[64];
	DWORD w;
	HWND h;

	HMODULE advapi, kernel, user, netapi;
	CRYPTACQUIRECONTEXT acquire = 0;
	CRYPTGENRANDOM gen = 0;
	CRYPTRELEASECONTEXT release = 0;
#if 1 /* There was previously a problem with NETSTATGET.  Currently, this
       * section is still experimental, but if all goes well, this conditional
       * will be removed
       */
	NETSTATGET netstatget = 0;
	NETFREE netfree = 0;
#endif /* 1 */

	/* Determine the OS version we are on so we can turn off things 
	 * that do not work properly.
	 */
        OSVERSIONINFO osverinfo ;
        osverinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO) ;
        GetVersionEx( &osverinfo ) ;

	/* load functions dynamically - not available on all systems */
	advapi = LoadLibrary("ADVAPI32.DLL");
	kernel = LoadLibrary("KERNEL32.DLL");
	user = LoadLibrary("USER32.DLL");
	netapi = LoadLibrary("NETAPI32.DLL");

#if 1 /* There was previously a problem with NETSTATGET.  Currently, this
       * section is still experimental, but if all goes well, this conditional
       * will be removed
       */
	if (netapi)
		{
		netstatget = (NETSTATGET) GetProcAddress(netapi,"NetStatisticsGet");
		netfree = (NETFREE) GetProcAddress(netapi,"NetApiBufferFree");
		}

	if (netstatget && netfree)
		{
		LPBYTE outbuf;
		/* NetStatisticsGet() is a Unicode only function
 		 * STAT_WORKSTATION_0 contains 45 fields and STAT_SERVER_0
		 * contains 17 fields.  We treat each field as a source of
		 * one byte of entropy.
                 */

		if (netstatget(NULL, L"LanmanWorkstation", 0, 0, &outbuf) == 0)
			{
			RAND_add(outbuf, sizeof(STAT_WORKSTATION_0), 45);
			netfree(outbuf);
			}
		if (netstatget(NULL, L"LanmanServer", 0, 0, &outbuf) == 0)
			{
			RAND_add(outbuf, sizeof(STAT_SERVER_0), 17);
			netfree(outbuf);
			}
		}

	if (netapi)
		FreeLibrary(netapi);
#endif /* 1 */
 
        /* It appears like this can cause an exception deep within ADVAPI32.DLL
         * at random times on Windows 2000.  Reported by Jeffrey Altman.  
         * Only use it on NT.
	 */
        if ( osverinfo.dwPlatformId == VER_PLATFORM_WIN32_NT &&
		osverinfo.dwMajorVersion < 5)
		{
		/* Read Performance Statistics from NT/2000 registry
		 * The size of the performance data can vary from call
		 * to call so we must guess the size of the buffer to use
		 * and increase its size if we get an ERROR_MORE_DATA
		 * return instead of ERROR_SUCCESS.
		 */
		LONG   rc=ERROR_MORE_DATA;
		char * buf=NULL;
		DWORD bufsz=0;
		DWORD length;

		while (rc == ERROR_MORE_DATA)
			{
			buf = realloc(buf,bufsz+8192);
			if (!buf)
				break;
			bufsz += 8192;

			length = bufsz;
			rc = RegQueryValueEx(HKEY_PERFORMANCE_DATA, "Global",
				NULL, NULL, buf, &length);
			}
		if (rc == ERROR_SUCCESS)
			{
                        /* For entropy count assume only least significant
			 * byte of each DWORD is random.
                         */
			RAND_add(&length, sizeof(length), 0);
			RAND_add(buf, length, length / 4.0);
			}
		if (buf)
			free(buf);
		}

	if (advapi)
		{
		acquire = (CRYPTACQUIRECONTEXT) GetProcAddress(advapi,
			"CryptAcquireContextA");
		gen = (CRYPTGENRANDOM) GetProcAddress(advapi,
			"CryptGenRandom");
		release = (CRYPTRELEASECONTEXT) GetProcAddress(advapi,
			"CryptReleaseContext");
		}

	if (acquire && gen && release)
		{
		/* poll the CryptoAPI PRNG */
                /* The CryptoAPI returns sizeof(buf) bytes of randomness */
		if (acquire(&hProvider, 0, 0, PROV_RSA_FULL,
			CRYPT_VERIFYCONTEXT))
			{
			if (gen(hProvider, sizeof(buf), buf) != 0)
				{
				RAND_add(buf, sizeof(buf), sizeof(buf));
#if 0
				printf("randomness from PROV_RSA_FULL\n");
#endif
				}
			release(hProvider, 0); 
			}
		
		/* poll the Pentium PRG with CryptoAPI */
		if (acquire(&hProvider, 0, INTEL_DEF_PROV, PROV_INTEL_SEC, 0))
			{
			if (gen(hProvider, sizeof(buf), buf) != 0)
				{
				RAND_add(buf, sizeof(buf), sizeof(buf));
#if 0
				printf("randomness from PROV_INTEL_SEC\n");
#endif
				}
			release(hProvider, 0);
			}
		}

        if (advapi)
		FreeLibrary(advapi);

	/* timer data */
	readtimer();
	
	/* memory usage statistics */
	GlobalMemoryStatus(&m);
	RAND_add(&m, sizeof(m), 1);

	/* process ID */
	w = GetCurrentProcessId();
	RAND_add(&w, sizeof(w), 1);

	if (user)
		{
		GETCURSORINFO cursor;
		GETFOREGROUNDWINDOW win;
		GETQUEUESTATUS queue;

		win = (GETFOREGROUNDWINDOW) GetProcAddress(user, "GetForegroundWindow");
		cursor = (GETCURSORINFO) GetProcAddress(user, "GetCursorInfo");
		queue = (GETQUEUESTATUS) GetProcAddress(user, "GetQueueStatus");

		if (win)
			{
			/* window handle */
			h = win();
			RAND_add(&h, sizeof(h), 0);
			}
		if (cursor)
			{
			/* unfortunately, its not safe to call GetCursorInfo()
			 * on NT4 even though it exists in SP3 (or SP6) and
			 * higher.
			 */
			if ( osverinfo.dwPlatformId == VER_PLATFORM_WIN32_NT &&
				osverinfo.dwMajorVersion < 5)
				cursor = 0;
			}
		if (cursor)
			{
			/* cursor position */
                        /* assume 2 bytes of entropy */
			CURSORINFO ci;
			ci.cbSize = sizeof(CURSORINFO);
			if (cursor(&ci))
				RAND_add(&ci, ci.cbSize, 2);
			}

		if (queue)
			{
			/* message queue status */
                        /* assume 1 byte of entropy */
			w = queue(QS_ALLEVENTS);
			RAND_add(&w, sizeof(w), 1);
			}

		FreeLibrary(user);
		}

	/* Toolhelp32 snapshot: enumerate processes, threads, modules and heap
	 * http://msdn.microsoft.com/library/psdk/winbase/toolhelp_5pfd.htm
	 * (Win 9x and 2000 only, not available on NT)
	 *
	 * This seeding method was proposed in Peter Gutmann, Software
	 * Generation of Practically Strong Random Numbers,
	 * http://www.usenix.org/publications/library/proceedings/sec98/gutmann.html
     * revised version at http://www.cryptoengines.com/~peter/06_random.pdf
	 * (The assignment of entropy estimates below is arbitrary, but based
	 * on Peter's analysis the full poll appears to be safe. Additional
	 * interactive seeding is encouraged.)
	 */

	if (kernel)
		{
		CREATETOOLHELP32SNAPSHOT snap;
		HANDLE handle;

		HEAP32FIRST heap_first;
		HEAP32NEXT heap_next;
		HEAP32LIST heaplist_first, heaplist_next;
		PROCESS32 process_first, process_next;
		THREAD32 thread_first, thread_next;
		MODULE32 module_first, module_next;

		HEAPLIST32 hlist;
		HEAPENTRY32 hentry;
		PROCESSENTRY32 p;
		THREADENTRY32 t;
		MODULEENTRY32 m;

		snap = (CREATETOOLHELP32SNAPSHOT)
			GetProcAddress(kernel, "CreateToolhelp32Snapshot");
		heap_first = (HEAP32FIRST) GetProcAddress(kernel, "Heap32First");
		heap_next = (HEAP32NEXT) GetProcAddress(kernel, "Heap32Next");
		heaplist_first = (HEAP32LIST) GetProcAddress(kernel, "Heap32ListFirst");
		heaplist_next = (HEAP32LIST) GetProcAddress(kernel, "Heap32ListNext");
		process_first = (PROCESS32) GetProcAddress(kernel, "Process32First");
		process_next = (PROCESS32) GetProcAddress(kernel, "Process32Next");
		thread_first = (THREAD32) GetProcAddress(kernel, "Thread32First");
		thread_next = (THREAD32) GetProcAddress(kernel, "Thread32Next");
		module_first = (MODULE32) GetProcAddress(kernel, "Module32First");
		module_next = (MODULE32) GetProcAddress(kernel, "Module32Next");

		if (snap && heap_first && heap_next && heaplist_first &&
			heaplist_next && process_first && process_next &&
			thread_first && thread_next && module_first &&
			module_next && (handle = snap(TH32CS_SNAPALL,0))
			!= NULL)
			{
			/* heap list and heap walking */
                        /* HEAPLIST32 contains 3 fields that will change with
                         * each entry.  Consider each field a source of 1 byte
                         * of entropy.
                         * HEAPENTRY32 contains 5 fields that will change with 
                         * each entry.  Consider each field a source of 1 byte
                         * of entropy.
                         */
			hlist.dwSize = sizeof(HEAPLIST32);		
			if (heaplist_first(handle, &hlist))
				do
					{
					RAND_add(&hlist, hlist.dwSize, 3);
					hentry.dwSize = sizeof(HEAPENTRY32);
					if (heap_first(&hentry,
						hlist.th32ProcessID,
						hlist.th32HeapID))
						{
						int entrycnt = 50;
						do
							RAND_add(&hentry,
								hentry.dwSize, 5);
						while (heap_next(&hentry)
							&& --entrycnt > 0);
						}
					} while (heaplist_next(handle,
						&hlist));
			
			/* process walking */
                        /* PROCESSENTRY32 contains 9 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			p.dwSize = sizeof(PROCESSENTRY32);
			if (process_first(handle, &p))
				do
					RAND_add(&p, p.dwSize, 9);
				while (process_next(handle, &p));

			/* thread walking */
                        /* THREADENTRY32 contains 6 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			t.dwSize = sizeof(THREADENTRY32);
			if (thread_first(handle, &t))
				do
					RAND_add(&t, t.dwSize, 6);
				while (thread_next(handle, &t));

			/* module walking */
                        /* MODULEENTRY32 contains 9 fields that will change
                         * with each entry.  Consider each field a source of
                         * 1 byte of entropy.
                         */
			m.dwSize = sizeof(MODULEENTRY32);
			if (module_first(handle, &m))
				do
					RAND_add(&m, m.dwSize, 9);
				while (module_next(handle, &m));

			CloseHandle(handle);
			}

		FreeLibrary(kernel);
		}

#if 0
	printf("Exiting RAND_poll\n");
#endif

	return(1);
}
示例#9
0
void evg_faults_insert(void)
{
	struct evg_fault_t *fault;
	struct evg_compute_unit_t *compute_unit;

	for (;;)
	{
		linked_list_head(evg_fault_list);
		fault = linked_list_get(evg_fault_list);
		if (!fault || fault->cycle > evg_gpu->cycle)
			break;

		/* Insert fault depending on fault type */
		switch (fault->type)
		{

		case evg_fault_ams:
		{
			struct evg_work_group_t *work_group;
			struct evg_wavefront_t *wavefront;
			struct evg_work_item_t *work_item;

			int work_group_id;  /* in compute unit */
			int wavefront_id;  /* in compute unit */
			int value;

			/* Initial debug */
			evg_faults_debug("fault clk=%lld cu=%d type=\"ams\" stack=%d am=%d bit=%d ",
				evg_gpu->cycle,
				fault->compute_unit_id, fault->stack_id,
				fault->active_mask_id, fault->bit);
			assert(fault->cycle == evg_gpu->cycle);
			compute_unit = evg_gpu->compute_units[fault->compute_unit_id];

			/* If compute unit is idle, dismiss */
			if (!compute_unit->work_group_count)
			{
				evg_faults_debug("effect=\"cu_idle\"");
				goto end_loop;
			}

			/* Get work-group and wavefront. If wavefront ID exceeds current number, dismiss */
			work_group_id = fault->stack_id / evg_gpu->ndrange->wavefronts_per_work_group;
			wavefront_id = fault->stack_id % evg_gpu->ndrange->wavefronts_per_work_group;
			if (work_group_id >= evg_gpu_max_work_groups_per_compute_unit
				|| !compute_unit->work_groups[work_group_id])
			{
				evg_faults_debug("effect=\"wf_idle\"");
				goto end_loop;
			}
			work_group = compute_unit->work_groups[work_group_id];
			wavefront = work_group->wavefronts[wavefront_id];

			/* If active_mask_id exceeds stack top, dismiss */
			if (fault->active_mask_id > wavefront->stack_top)
			{
				evg_faults_debug("effect=\"am_idle\"");
				goto end_loop;
			}

			/* If 'bit' exceeds number of work-items in wavefront, dismiss */
			if (fault->bit >= wavefront->work_item_count)
			{
				evg_faults_debug("effect=\"wi_idle\"");
				goto end_loop;
			}

			/* Fault caused an error, show affected software entities */
			work_item = wavefront->work_items[fault->bit];
			evg_faults_debug("effect=\"error\" wg=%d wf=%d wi=%d",
				work_group->id,
				wavefront->id,
				work_item->id);

			/* Inject fault */
			value = bit_map_get(wavefront->active_stack,
				fault->active_mask_id * wavefront->work_item_count
				+ fault->bit, 1);
			bit_map_set(wavefront->active_stack,
				fault->active_mask_id * wavefront->work_item_count
				+ fault->bit, 1, !value);
			evg_fault_errors++;

			break;
		}

		case evg_fault_reg:
		{
			struct evg_opencl_kernel_t *kernel = evg_gpu->ndrange->kernel;

			int work_group_id_in_compute_unit;
			struct evg_work_group_t *work_group;
			struct evg_wavefront_t *wavefront;

			int num_registers_per_work_group;

			int work_item_id_in_compute_unit;
			int work_item_id_in_work_group;
			struct evg_work_item_t *work_item;

			struct linked_list_t *fetch_queue;
			struct evg_uop_t *inst_buffer;
			struct evg_uop_t *exec_buffer;
			struct heap_t *event_queue;
			struct evg_uop_t *uop;

			int lo_reg;

			/* Initial debug */
			evg_faults_debug("fault clk=%lld cu=%d type=\"reg\" reg=%d bit=%d ",
				evg_gpu->cycle,
				fault->compute_unit_id,
				fault->reg_id,
				fault->bit);
			assert(fault->cycle == evg_gpu->cycle);
			compute_unit = evg_gpu->compute_units[fault->compute_unit_id];

			/* If compute unit is idle, dismiss */
			if (!compute_unit->work_group_count)
			{
				evg_faults_debug("effect=\"cu_idle\"");
				goto end_loop;
			}

			/* Get work-group */
			num_registers_per_work_group = kernel->bin_file->enc_dict_entry_evergreen->num_gpr_used
				* kernel->local_size;
			work_group_id_in_compute_unit = fault->reg_id / num_registers_per_work_group;
			if (work_group_id_in_compute_unit >= evg_gpu_max_work_groups_per_compute_unit)
			{
				evg_faults_debug("effect=\"reg_idle\"");
				goto end_loop;
			}

			/* Get work-group (again) */
			work_group = compute_unit->work_groups[work_group_id_in_compute_unit];
			if (!work_group)
			{
				evg_faults_debug("effect=\"reg_idle\"");
				goto end_loop;
			}

			/* Get affected entities */
			work_item_id_in_compute_unit = fault->reg_id
				/ kernel->bin_file->enc_dict_entry_evergreen->num_gpr_used;
			work_item_id_in_work_group = work_item_id_in_compute_unit % kernel->local_size;
			work_item = work_group->work_items[work_item_id_in_work_group];
			wavefront = work_item->wavefront;
			lo_reg = fault->reg_id % kernel->bin_file->enc_dict_entry_evergreen->num_gpr_used;

			/* Fault falling between Fetch and Read stage of an instruction
			 * consuming register. This case cannot be modeled due to functional
			 * simulation skew. */
			fetch_queue = compute_unit->alu_engine.fetch_queue;
			inst_buffer = compute_unit->alu_engine.inst_buffer;
			for (linked_list_head(fetch_queue); !linked_list_is_end(fetch_queue);
				linked_list_next(fetch_queue))
			{
				uop = linked_list_get(fetch_queue);
				if (evg_stack_faults_is_idep(uop, wavefront, lo_reg))
				{
					evg_faults_debug("effect=\"reg_read\"");
					goto end_loop;
				}
			}
			uop = inst_buffer;
			if (uop && evg_stack_faults_is_idep(uop, wavefront, lo_reg))
			{
				evg_faults_debug("effect=\"reg_read\"");
				goto end_loop;
			}

			/* Fault falling between Fetch and Write stage of an instruction
			 * writing on the register. The instruction will overwrite the fault,
			 * so this shouldn't cause its injection. */
			exec_buffer = compute_unit->alu_engine.exec_buffer;
			for (linked_list_head(fetch_queue); !linked_list_is_end(fetch_queue);
				linked_list_next(fetch_queue))
			{
				uop = linked_list_get(fetch_queue);
				if (evg_stack_faults_is_odep(uop, wavefront, lo_reg))
				{
					evg_faults_debug("effect=\"reg_write\"");
					goto end_loop;
				}
			}
			uop = inst_buffer;
			if (uop && evg_stack_faults_is_odep(uop, wavefront, lo_reg))
			{
				evg_faults_debug("effect=\"reg_write\"");
				goto end_loop;
			}
			uop = exec_buffer;
			if (uop && evg_stack_faults_is_odep(uop, wavefront, lo_reg))
			{
				evg_faults_debug("effect=\"reg_write\"");
				goto end_loop;
			}
			event_queue = compute_unit->alu_engine.event_queue;
			for (heap_first(event_queue, (void **) &uop); uop;
				heap_next(event_queue, (void **) &uop))
			{
				if (evg_stack_faults_is_odep(uop, wavefront, lo_reg))
				{
					evg_faults_debug("effect=\"reg_write\"");
					goto end_loop;
				}
			}

			/* Fault caused error */
			evg_faults_debug("effect=\"error\" ");
			evg_faults_debug("wg=%d wf=%d wi=%d lo_reg=%d ",
				work_group->id, work_item->wavefront->id, work_item->id, lo_reg);

			/* Insert the fault */
			if (fault->bit < 32)
				work_item->gpr[lo_reg].elem[0] ^= 1 << fault->bit;
			else if (fault->bit < 64)
				work_item->gpr[lo_reg].elem[1] ^= 1 << (fault->bit - 32);
			else if (fault->bit < 96)
				work_item->gpr[lo_reg].elem[2] ^= 1 << (fault->bit - 64);
			else
				work_item->gpr[lo_reg].elem[3] ^= 1 << (fault->bit - 96);
			evg_fault_errors++;

			break;

		}

		case evg_fault_mem:
		{
			struct evg_work_group_t *work_group;

			int work_group_id_in_compute_unit;
			unsigned char value;

			/* Initial debug */
			evg_faults_debug("fault clk=%lld cu=%d type=\"mem\" byte=%d bit=%d ",
				evg_gpu->cycle,
				fault->compute_unit_id,
				fault->byte,
				fault->bit);
			assert(fault->cycle == evg_gpu->cycle);
			compute_unit = evg_gpu->compute_units[fault->compute_unit_id];

			/* If compute unit is idle, dismiss */
			if (!compute_unit->work_group_count)
			{
				evg_faults_debug("effect=\"cu_idle\"");
				goto end_loop;
			}

			/* Check if there is any local memory used at all */
			if (!evg_gpu->ndrange->local_mem_top)
			{
				evg_faults_debug("effect=\"mem_idle\"");
				goto end_loop;
			}

			/* Get work-group */
			work_group_id_in_compute_unit = fault->byte / evg_gpu->ndrange->local_mem_top;
			if (work_group_id_in_compute_unit >= evg_gpu_max_work_groups_per_compute_unit)
			{
				evg_faults_debug("effect=\"mem_idle\"");
				goto end_loop;
			}

			/* Get work-group (again) */
			work_group = compute_unit->work_groups[work_group_id_in_compute_unit];
			if (!work_group)
			{
				evg_faults_debug("effect=\"mem_idle\"");
				goto end_loop;
			}

			/* Inject fault */
			evg_faults_debug("effect=\"error\" wg=%d ",
				work_group->id);
			mem_read(work_group->local_mem, fault->byte, 1, &value);
			value ^= 1 << fault->bit;
			mem_write(work_group->local_mem, fault->byte, 1, &value);
			evg_fault_errors++;

			break;

		}

		default:
			panic("invalid fault type");

		}

end_loop:
		/* Extract and free */
		free(fault);
		linked_list_remove(evg_fault_list);
		evg_faults_debug("\n");

		/* If all faults were inserted and no error was caused, end simulation */
		if (!linked_list_count(evg_fault_list) && !evg_fault_errors)
			esim_finish = esim_finish_evg_no_faults;
	}
}