示例#1
22
__inline void destroy_left(){
	double ticks;
	GLCD_SetTextColor(LightGrey);
	GLCD_Bargraph (current_block.x_offset, current_block.y_offset, abs(diff), HEIGHT, 1024);
				
	ticks = os_time_get();
	while(os_time_get() - ticks < ERASE_TIME);

	GLCD_SetTextColor(White);
	GLCD_Bargraph (current_block.x_offset, current_block.y_offset, abs(diff), HEIGHT, 1024);
}
示例#2
0
文件: main.c 项目: mihai93/MTE-241
__task void base_task( void ) {
	array_t array;
	// Set the priority of the base task
	//  - lowest priority:     1
	//  - highest priority:  254

	os_tsk_prio_self( 10 );

	while ( 1 ) {
		array = generate_array();

		time = os_time_get();

		// Sort array
		#ifdef TEST_SEMAPHORE_IMPLEMENTATION
			quicksort_sem( array );
		#else
			quicksort( array );
		#endif

		time = os_time_get() - time;

		if( is_sorted_array( &array ) )
			printf("The array is sucessfully sorted in %.1f us.\n", time);
		//else
		//	printf("The array is not sorted!\n");

		#ifdef PRINT_ARRAY
			print_array( &array );
		#endif
	}
}
示例#3
0
static boolean r300_fence_finish(struct pipe_screen *screen,
                                 struct pipe_fence_handle *fence,
                                 uint64_t timeout)
{
    struct radeon_winsys *rws = r300_screen(screen)->rws;
    struct pb_buffer *rfence = (struct pb_buffer*)fence;

    if (timeout != PIPE_TIMEOUT_INFINITE) {
        int64_t start_time = os_time_get();

        /* Convert to microseconds. */
        timeout /= 1000;

        /* Wait in a loop. */
        while (rws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE)) {
            if (os_time_get() - start_time >= timeout) {
                return FALSE;
            }
            os_time_sleep(10);
        }
        return TRUE;
    }

    rws->buffer_wait(rfence, RADEON_USAGE_READWRITE);
    return TRUE;
}
/**
 * Update fragment state.  This is called just prior to drawing
 * something when some fragment-related state has changed.
 */
void 
llvmpipe_update_fs(struct llvmpipe_context *lp)
{
   struct lp_fragment_shader *shader = lp->fs;
   struct lp_fragment_shader_variant_key key;
   struct lp_fragment_shader_variant *variant = NULL;
   struct lp_fs_variant_list_item *li;

   make_variant_key(lp, shader, &key);

   li = first_elem(&shader->variants);
   while(!at_end(&shader->variants, li)) {
      if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
         variant = li->base;
         break;
      }
      li = next_elem(li);
   }

   if (variant) {
      move_to_head(&lp->fs_variants_list, &variant->list_item_global);
   }
   else {
      int64_t t0, t1;
      int64_t dt;
      unsigned i;
      if (lp->nr_fs_variants >= LP_MAX_SHADER_VARIANTS) {
         struct pipe_context *pipe = &lp->pipe;

         /*
          * XXX: we need to flush the context until we have some sort of reference
          * counting in fragment shaders as they may still be binned
          * Flushing alone might not be sufficient we need to wait on it too.
          */
         llvmpipe_finish(pipe, __FUNCTION__);

         for (i = 0; i < LP_MAX_SHADER_VARIANTS / 4; i++) {
            struct lp_fs_variant_list_item *item = last_elem(&lp->fs_variants_list);
            remove_shader_variant(lp, item->base);
         }
      }
      t0 = os_time_get();

      variant = generate_variant(lp, shader, &key);

      t1 = os_time_get();
      dt = t1 - t0;
      LP_COUNT_ADD(llvm_compile_time, dt);
      LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */

      if (variant) {
         insert_at_head(&shader->variants, &variant->list_item_local);
         insert_at_head(&lp->fs_variants_list, &variant->list_item_global);
         lp->nr_fs_variants++;
         shader->variants_cached++;
      }
   }

   lp_setup_set_fs_variant(lp->setup, variant);
}
示例#5
0
void
bletest_execute_initiator(void)
{
    int rc;
    uint16_t handle;

    /* 
     * Determine if there is an active connection for the current handle
     * we are trying to create. If so, start looking for the next one
     */
    if (g_bletest_current_conns < BLETEST_CFG_CONCURRENT_CONNS) {
        handle = g_bletest_current_conns + 1;
        if (ble_ll_conn_find_active_conn(handle)) {
            /* Set LED to slower blink rate */
            g_bletest_led_rate = OS_TICKS_PER_SEC;

            /* Set next os time to start the connection update */
            g_next_os_time = 0;

            /* Ask for version information */
            rc = host_hci_cmd_rd_rem_version(handle);
            assert(rc == 0);
            host_hci_outstanding_opcode = 0;

            /* Scanning better be stopped! */
            assert(ble_ll_scan_enabled() == 0);

            /* Add to current connections */
            ++g_bletest_current_conns;

            /* Move to next connection */
            if (g_bletest_current_conns < BLETEST_CFG_CONCURRENT_CONNS) {
                /* restart initiating */
                g_bletest_cur_peer_addr[5] += 1;
                g_dev_addr[5] += 1;
                bletest_init_initiator();
            }
        }
    } else {
        /* Issue a connection parameter update to connection handle 1 */
        if (g_next_os_time == 0) {
            g_next_os_time = os_time_get();
            g_next_os_time += OS_TICKS_PER_SEC * 5;
        } else {
            if (g_next_os_time != 0xffffffff) {
#if 0
                if ((int32_t)(os_time_get() - g_next_os_time) >= 0) {
                    bletest_send_conn_update(1);
                    g_next_os_time = 0xffffffff;
                }
#else
                g_next_os_time = 0xffffffff;
#endif
            }
        }
    }
}
示例#6
0
void r300_flush(struct pipe_context *pipe,
                unsigned flags,
                struct pipe_fence_handle **fence)
{
    struct r300_context *r300 = r300_context(pipe);

    if (r300->screen->info.drm_minor >= 12) {
        flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
    }

    if (r300->dirty_hw) {
        r300_flush_and_cleanup(r300, flags, fence);
    } else {
        if (fence) {
            /* We have to create a fence object, but the command stream is empty
             * and we cannot emit an empty CS. Let's write to some reg. */
            CS_LOCALS(r300);
            OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0);
            r300->rws->cs_flush(r300->cs, flags, fence, 0);
        } else {
            /* Even if hw is not dirty, we should at least reset the CS in case
             * the space checking failed for the first draw operation. */
            r300->rws->cs_flush(r300->cs, flags, NULL, 0);
        }
    }

    /* Update Hyper-Z status. */
    if (r300->hyperz_enabled) {
        /* If there was a Z clear, keep Hyper-Z access. */
        if (r300->num_z_clears) {
            r300->hyperz_time_of_last_flush = os_time_get();
            r300->num_z_clears = 0;
        } else if (r300->hyperz_time_of_last_flush - os_time_get() > 2000000) {
            /* If there hasn't been a Z clear for 2 seconds, revoke Hyper-Z access. */
            r300->hiz_in_use = FALSE;

            /* Decompress the Z buffer. */
            if (r300->zmask_in_use) {
                if (r300->locked_zbuffer) {
                    r300_decompress_zmask_locked(r300);
                } else {
                    r300_decompress_zmask(r300);
                }

                if (fence && *fence)
                    r300->rws->fence_reference(fence, NULL);
                r300_flush_and_cleanup(r300, flags, fence);
            }

            /* Revoke Hyper-Z access, so that some other process can take it. */
            r300->rws->cs_request_feature(r300->cs, RADEON_FID_R300_HYPERZ_ACCESS,
                                          FALSE);
            r300->hyperz_enabled = FALSE;
        }
    }
}
示例#7
0
void svga_context_flush( struct svga_context *svga, 
                         struct pipe_fence_handle **pfence )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_fence_handle *fence = NULL;
   uint64_t t0;

   svga->curr.nr_fbs = 0;

   /* Ensure that texture dma uploads are processed
    * before submitting commands.
    */
   svga_context_flush_buffers(svga);

   svga->hud.command_buffer_size +=
      svga->swc->get_command_buffer_size(svga->swc);

   /* Flush pending commands to hardware:
    */
   t0 = os_time_get();
   svga->swc->flush(svga->swc, &fence);
   svga->hud.flush_time += (os_time_get() - t0);

   svga->hud.num_flushes++;

   svga_screen_cache_flush(svgascreen, fence);

   /* To force the re-emission of rendertargets and texture sampler bindings on
    * the next command buffer.
    */
   svga->rebind.flags.rendertargets = TRUE;
   svga->rebind.flags.texture_samplers = TRUE;

   if (svga_have_gb_objects(svga)) {

      svga->rebind.flags.constbufs = TRUE;
      svga->rebind.flags.vs = TRUE;
      svga->rebind.flags.fs = TRUE;
      svga->rebind.flags.gs = TRUE;

      if (svga_need_to_rebind_resources(svga)) {
         svga->rebind.flags.query = TRUE;
      }
   }

   if (SVGA_DEBUG & DEBUG_SYNC) {
      if (fence)
         svga->pipe.screen->fence_finish( svga->pipe.screen, fence,
                                          PIPE_TIMEOUT_INFINITE);
   }

   if (pfence)
      svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);

   svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
}
示例#8
0
__inline void destroy_right(){
	double ticks;
	GLCD_SetTextColor(LightGrey);
	GLCD_Bargraph (x_offsets[count - 1] + widths[count - 1], current_block.y_offset, diff, HEIGHT, 1024);
	
	ticks = os_time_get();
	while(os_time_get() - ticks < ERASE_TIME);
	
	GLCD_SetTextColor(White);
	GLCD_Bargraph (x_offsets[count - 1] + widths[count - 1], current_block.y_offset, diff, HEIGHT, 1024);
}
示例#9
0
/**
 * Update fragment state.  This is called just prior to drawing
 * something when some fragment-related state has changed.
 */
void 
llvmpipe_update_fs(struct llvmpipe_context *lp)
{
   struct lp_fragment_shader *shader = lp->fs;
   struct lp_fragment_shader_variant_key key;
   struct lp_fragment_shader_variant *variant;
   boolean opaque;

   make_variant_key(lp, shader, &key);

   variant = shader->variants;
   while(variant) {
      if(memcmp(&variant->key, &key, sizeof key) == 0)
         break;

      variant = variant->next;
   }

   if (!variant) {
      int64_t t0, t1;
      int64_t dt;
      t0 = os_time_get();

      variant = generate_variant(lp, shader, &key);

      t1 = os_time_get();
      dt = t1 - t0;
      LP_COUNT_ADD(llvm_compile_time, dt);
      LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */
   }

   shader->current = variant;

   /* TODO: put this in the variant */
   /* TODO: most of these can be relaxed, in particular the colormask */
   opaque = !key.blend.logicop_enable &&
            !key.blend.rt[0].blend_enable &&
            key.blend.rt[0].colormask == 0xf &&
            !key.alpha.enabled &&
            !key.depth.enabled &&
            !key.scissor &&
            !shader->info.uses_kill
            ? TRUE : FALSE;

   lp_setup_set_fs_functions(lp->setup, 
                             shader->current->jit_function[0],
                             shader->current->jit_function[1],
                             opaque);
}
示例#10
0
int
spiflash_wait_ready(struct spiflash_dev *dev, uint32_t timeout_ms)
{
    uint32_t ticks;
    os_time_t exp_time;
    os_time_ms_to_ticks(timeout_ms, &ticks);
    exp_time = os_time_get() + ticks;

    while (!spiflash_device_ready(dev)) {
        if (os_time_get() > exp_time) {
            return -1;
        }
    }
    return 0;
}
static int
sim_accel_sensor_read(struct sensor *sensor, sensor_type_t type,
        sensor_data_func_t data_func, void *data_arg, uint32_t timeout)
{
    struct sim_accel *sa;
    struct sensor_accel_data sad;
    os_time_t now;
    uint32_t num_samples;
    int i;
    int rc;

    /* If the read isn't looking for accel data, then don't do anything. */
    if (!(type & SENSOR_TYPE_ACCELEROMETER)) {
        rc = SYS_EINVAL;
        goto err;
    }

    sa = (struct sim_accel *) SENSOR_GET_DEVICE(sensor);

    /* When a sensor is "read", we get the last 'n' samples from the device
     * and pass them to the sensor data function.  Based on the sample
     * interval provided to sim_accel_config() and the last time this function
     * was called, 'n' samples are generated.
     */
    now = os_time_get();

    num_samples = (now - sa->sa_last_read_time) / sa->sa_cfg.sac_sample_itvl;
    num_samples = min(num_samples, sa->sa_cfg.sac_nr_samples);

    /* By default only readings are provided for 1-axis (x), however,
     * if number of axises is configured, up to 3-axises of data can be
     * returned.
     */
    sad.sad_x = 0.0;
    sad.sad_y = 0.0;
    sad.sad_z = 0.0;

    sad.sad_x_is_valid = 1;
    sad.sad_y_is_valid = 0;
    sad.sad_z_is_valid = 0;

    if (sa->sa_cfg.sac_nr_axises > 1) {
        sad.sad_y = 0.0;
    }
    if (sa->sa_cfg.sac_nr_axises > 2) {
        sad.sad_z = 0.0;
    }

    /* Call data function for each of the generated readings. */
    for (i = 0; i < num_samples; i++) {
        rc = data_func(sensor, data_arg, &sad, SENSOR_TYPE_ACCELEROMETER);
        if (rc != 0) {
            goto err;
        }
    }

    return (0);
err:
    return (rc);
}
示例#12
0
static void
query_cpu_load(struct hud_graph *gr)
{
   struct cpu_info *info = gr->query_data;
   uint64_t now = os_time_get();

   if (info->last_time) {
      if (info->last_time + gr->pane->period <= now) {
         uint64_t cpu_busy, cpu_total, cpu_load;

         get_cpu_stats(info->cpu_index, &cpu_busy, &cpu_total);

         cpu_load = (cpu_busy - info->last_cpu_busy) * 100 /
                    (double)(cpu_total - info->last_cpu_total);
         hud_graph_add_value(gr, cpu_load);

         info->last_cpu_busy = cpu_busy;
         info->last_cpu_total = cpu_total;
         info->last_time = now;
      }
   }
   else {
      /* initialize */
      info->last_time = now;
      get_cpu_stats(info->cpu_index, &info->last_cpu_busy,
                    &info->last_cpu_total);
   }
}
示例#13
0
/** 
 *  @brief This function computes the RSSI in received packet.
 *  
 *  @param priv    A pointer to wlan_private structure
 *  @param pRxPD   A pointer to RxPD structure of received packet
 *  @return 	   n/a
 */
static void
wlan_compute_rssi(wlan_private * priv, RxPD * pRxPD)
{
    wlan_adapter *Adapter = priv->adapter;

    ENTER();

    PRINTM(INFO, "RxPD: SNR = %d, NF = %d\n", pRxPD->SNR, pRxPD->NF);

    Adapter->SNR[TYPE_RXPD][TYPE_NOAVG] = pRxPD->SNR;
    Adapter->NF[TYPE_RXPD][TYPE_NOAVG] = pRxPD->NF;
    wlan_save_rawSNRNF(priv, pRxPD);

    Adapter->RxPDAge = os_time_get();
    Adapter->RxPDRate = pRxPD->RxRate;

    Adapter->SNR[TYPE_RXPD][TYPE_AVG] = wlan_getAvgSNR(priv) * AVG_SCALE;
    Adapter->NF[TYPE_RXPD][TYPE_AVG] = wlan_getAvgNF(priv) * AVG_SCALE;
    PRINTM(INFO, "SNR-avg = %d, NF-avg = %d\n",
           Adapter->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
           Adapter->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);

    Adapter->RSSI[TYPE_RXPD][TYPE_NOAVG] =
        CAL_RSSI(Adapter->SNR[TYPE_RXPD][TYPE_NOAVG],
                 Adapter->NF[TYPE_RXPD][TYPE_NOAVG]);

    Adapter->RSSI[TYPE_RXPD][TYPE_AVG] =
        CAL_RSSI(Adapter->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE,
                 Adapter->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE);
    wlan_check_subscribe_event(priv);
    LEAVE();
}
示例#14
0
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   struct pb_cache_manager *mgr = buf->mgr;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->base.reference));
   
   _pb_cache_buffer_list_check_free(mgr);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
      pb_reference(&buf->buffer, NULL);
      FREE(buf);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   buf->start = os_time_get();
   buf->end = buf->start + mgr->usecs;
   LIST_ADDTAIL(&buf->head, &mgr->delayed);
   ++mgr->numDelayed;
   mgr->cache_size += buf->base.size;
   pipe_mutex_unlock(mgr->mutex);
}
示例#15
0
static void
softpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
{
   struct softpipe_context *softpipe = softpipe_context( pipe );
   struct softpipe_query *sq = softpipe_query(q);

   switch (sq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      sq->start = softpipe->occlusion_count;
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
   case PIPE_QUERY_TIME_ELAPSED:
      sq->start = 1000*os_time_get();
      break;
   case PIPE_QUERY_SO_STATISTICS:
      sq->so.primitives_storage_needed = 0;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      sq->so.num_primitives_written = 0;
      softpipe->so_stats.num_primitives_written = 0;
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      sq->num_primitives_generated = 0;
      softpipe->num_primitives_generated = 0;
      break;
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_GPU_FINISHED:
      break;
   default:
      assert(0);
      break;
   }
   softpipe->active_query_count++;
   softpipe->dirty |= SP_NEW_QUERY;
}
示例#16
0
/**
 * Add a buffer to the cache. This is typically done when the buffer is
 * being released.
 */
void
pb_cache_add_buffer(struct pb_cache_entry *entry)
{
   struct pb_cache *mgr = entry->mgr;
   struct list_head *cache = &mgr->buckets[entry->bucket_index];
   struct pb_buffer *buf = entry->buffer;
   unsigned i;

   pipe_mutex_lock(mgr->mutex);
   assert(!pipe_is_referenced(&buf->reference));

   for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
      release_expired_buffers_locked(&mgr->buckets[i]);

   /* Directly release any buffer that exceeds the limit. */
   if (mgr->cache_size + buf->size > mgr->max_cache_size) {
      mgr->destroy_buffer(buf);
      pipe_mutex_unlock(mgr->mutex);
      return;
   }

   entry->start = os_time_get();
   entry->end = entry->start + mgr->usecs;
   LIST_ADDTAIL(&entry->head, cache);
   ++mgr->num_buffers;
   mgr->cache_size += buf->size;
   pipe_mutex_unlock(mgr->mutex);
}
示例#17
0
/** 
 *  @brief Exit deep sleep with timeout
 *   
 *  @param priv		A pointer to wlan_private structure
 *  @return    		WLAN_STATUS_SUCCESS or WLAN_STATUS_FAILURE
 */
int
wlan_exit_deep_sleep_timeout(wlan_private * priv)
{
    wlan_adapter *Adapter = priv->adapter;
    int ret = WLAN_STATUS_SUCCESS;

    ENTER();

    Adapter->IsAutoDeepSleepEnabled = FALSE;
    if (Adapter->IsDeepSleep == TRUE) {
        PRINTM(CMND, "Exit deep sleep... @ %lu\n", os_time_get());
        sbi_exit_deep_sleep(priv);
        if (Adapter->IsDeepSleep == TRUE) {
            if (os_wait_interruptible_timeout(Adapter->ds_awake_q,
                                              !Adapter->IsDeepSleep,
                                              MRVDRV_DEEP_SLEEP_EXIT_TIMEOUT) ==
                0) {
                PRINTM(MSG, "ds_awake_q: timer expired\n");
                ret = WLAN_STATUS_FAILURE;
            }
        }
    }

    LEAVE();
    return ret;
}
static void
fd_sw_end_query(struct fd_context *ctx, struct fd_query *q)
{
	struct fd_sw_query *sq = fd_sw_query(q);
	sq->end_value = read_counter(ctx, q->type);
	if (is_rate_query(q))
		sq->end_time = os_time_get();
}
示例#19
0
static uint64_t
nouveau_screen_get_timestamp(struct pipe_screen *pscreen)
{
	int64_t cpu_time = os_time_get() * 1000;

        /* getparam of PTIMER_TIME takes about x10 as long (several usecs) */

	return cpu_time + nouveau_screen(pscreen)->cpu_gpu_time_delta;
}
static boolean
fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
{
	struct fd_sw_query *sq = fd_sw_query(q);
	sq->begin_value = read_counter(ctx, q->type);
	if (is_rate_query(q))
		sq->begin_time = os_time_get();
   return true;
}
示例#21
0
/**
 * Main bletest function. Called by the task timer every 50 msecs.
 * 
 */
void
bletest_execute(void)
{
    /* Toggle LED at set rate */
    if ((int32_t)(os_time_get() - g_bletest_next_led_time) >= 0) {
        gpio_toggle(LED_BLINK_PIN);
        g_bletest_next_led_time = os_time_get() + g_bletest_led_rate;
    }
#if (BLETEST_CFG_ROLE == BLETEST_ROLE_ADVERTISER)
    bletest_execute_advertiser();
#endif
#if (BLETEST_CFG_ROLE == BLETEST_ROLE_SCANNER)
    bletest_execute_scanner();
#endif
#if (BLETEST_CFG_ROLE == BLETEST_ROLE_INITIATOR)
    bletest_execute_initiator();
#endif
}
示例#22
0
u32_t
sys_arch_sem_wait(sys_sem_t *sem, u32_t timo)
{
    u32_t now;

    if (timo == 0) {
        timo = OS_WAIT_FOREVER;
    } else {
        if (os_time_ms_to_ticks(timo, &timo)) {
            timo = OS_WAIT_FOREVER - 1;
        }
    }
    now = os_time_get();
    if (os_sem_pend(sem, timo) == OS_TIMEOUT) {
        return SYS_ARCH_TIMEOUT;
    }
    return (now - os_time_get()) * 1000 / OS_TICKS_PER_SEC;
}
示例#23
0
static void
timed_buffer_unmap(struct pipe_winsys *winsys,
                       struct pipe_buffer *buf)
{
   struct pipe_winsys *backend = timed_winsys(winsys)->backend;
   int64_t start = os_time_get();

   backend->buffer_unmap( backend, buf );

   time_finish(winsys, start, 3, __FUNCTION__);
}
示例#24
0
static void
timed_buffer_destroy(struct pipe_buffer *buf)
{
   struct pipe_winsys *winsys = buf->screen->winsys;
   struct pipe_winsys *backend = timed_winsys(winsys)->backend;
   int64_t start = os_time_get();

   backend->buffer_destroy( buf );

   time_finish(winsys, start, 4, __FUNCTION__);
}
static void
fd_sw_end_query(struct fd_context *ctx, struct fd_query *q)
{
	struct fd_sw_query *sq = fd_sw_query(q);
	sq->end_value = read_counter(ctx, q->type);
	if (is_time_rate_query(q)) {
		sq->end_time = os_time_get();
	} else if (is_draw_rate_query(q)) {
		sq->end_time = ctx->stats.draw_calls;
	}
}
static boolean
fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
{
	struct fd_sw_query *sq = fd_sw_query(q);
	sq->begin_value = read_counter(ctx, q->type);
	if (is_time_rate_query(q)) {
		sq->begin_time = os_time_get();
	} else if (is_draw_rate_query(q)) {
		sq->begin_time = ctx->stats.draw_calls;
	}
	return true;
}
示例#27
0
static void
timed_flush_frontbuffer( struct pipe_winsys *winsys,
                         struct pipe_surface *surf,
                         void *context_private)
{
   struct pipe_winsys *backend = timed_winsys(winsys)->backend;
   int64_t start = os_time_get();

   backend->flush_frontbuffer( backend, surf, context_private );

   time_finish(winsys, start, 5, __FUNCTION__);
}
示例#28
0
static const char *
timed_get_name( struct pipe_winsys *winsys )
{
   struct pipe_winsys *backend = timed_winsys(winsys)->backend;
   int64_t start = os_time_get();

   const char *ret = backend->get_name( backend );

   time_finish(winsys, start, 9, __FUNCTION__);
   
   return ret;
}
示例#29
0
static void
timed_fence_reference(struct pipe_winsys *winsys,
                    struct pipe_fence_handle **ptr,
                    struct pipe_fence_handle *fence)
{
   struct pipe_winsys *backend = timed_winsys(winsys)->backend;
   int64_t start = os_time_get();

   backend->fence_reference( backend, ptr, fence );

   time_finish(winsys, start, 10, __FUNCTION__);
}
示例#30
0
u32_t
sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, uint32_t timo)
{
    u32_t now;
    void *val;

    if (timo == 0) {
        timo = OS_WAIT_FOREVER;
    } else {
        if (os_time_ms_to_ticks(timo, &timo)) {
            timo = OS_WAIT_FOREVER - 1;
        }
    }
    now = os_time_get();
    if (os_queue_get(mbox, &val, timo)) {
        return SYS_ARCH_TIMEOUT;
    }
    if (msg != NULL) {
        *msg = val;
    }
    return (now - os_time_get()) * 1000 / OS_TICKS_PER_SEC;
}