Exemple #1
0
static void test1(void) {
  printf("***************************\n"
	 "*        TEST 1           *\n"
	 "***************************\n");

  init_cache(&cache);
  printf("\n--- Initial cache ---\n");
  print_all_cache(stdout, &cache);

  test_elem(A, 1, 1);
  test_elem(A, 1, 1);

  printf("\n--- After addition ---\n");
  print_all_cache(stdout, &cache);

  test_elem(B, 2, 2);
  test_elem(B, 2, 2);

  printf("\n--- After more addition ---\n");
  print_all_cache(stdout, &cache);

  cache_push(&cache);
  cache_push(&cache);
  printf("\n--- Push: level 2 ---\n");
  print_all_cache(stdout, &cache);

  test_elem(A, 0, 1);
  test_elem(A, 0, 1);

  test_elem(A, 1, 0);
  test_elem(A, 1, 0);

  printf("\n--- Content ---\n");
  print_all_cache(stdout, &cache);

  cache_push(&cache);
  printf("\n--- Push: level 3 ---\n");
  print_all_cache(stdout, &cache);

  cache_pop(&cache);
  printf("\n--- Pop: level 2 ---\n");
  print_all_cache(stdout, &cache);

  cache_pop(&cache);
  printf("\n--- Pop: level 1 ---\n");
  print_all_cache(stdout, &cache);

  cache_pop(&cache);
  printf("\n--- Pop: level 0 ---\n");
  print_all_cache(stdout, &cache);

  delete_cache(&cache);
}
Exemple #2
0
/** 
 * @brief  Compute a set of Gaussians with heuristic pruning.
 *
 * If the N-best mixtures in the previous frame is specified in @a last_id,
 * They are first computed to get the maximum value for each dimension.
 * After that, the rest of the Gaussians will be computed using the maximum
 * values as heuristics of uncomputed dimensions to drop unpromising
 * Gaussians from computation at early stage
 * of likelihood computation.  If the @a last_id is not specified (typically
 * at the first frame of the input), a safe pruning as same as one in
 * gprune_safe.c will be applied.
 *
 * The calculated scores will be stored to OP_calced_score, with its
 * corresponding mixture id to OP_calced_id.  These are done by calling
 * cache_push().
 * The number of calculated mixtures is also stored in OP_calced_num.
 * 
 * This can be called from calc_tied_mix() or calc_mix().
 * 
 * @param wrk [i/o] HMM computation work area
 * @param g [in] set of Gaussian densities to compute the output probability
 * @param gnum [in] length of above
 * @param last_id [in] ID list of N-best mixture in previous input frame,
 * or NULL if not exist
 * @param lnum [in] length of last_id
 */
void
gprune_heu(HMMWork *wrk, HTK_HMM_Dens **g, int gnum, int *last_id, int lnum)
{
  int i, j, num = 0;
  LOGPROB score, thres;

  if (last_id != NULL) {	/* compute them first to form thresholds */
    /* 1. clear backmax */
    init_backmax(wrk);
    /* 2. calculate first $OP_gprune_num with setting max for each dimension */
    for (j=0; j<lnum; j++) {
      i = last_id[j];
      score = compute_g_heu_updating(wrk, g[i]);
      num = cache_push(wrk, i, score, num);
      wrk->mixcalced[i] = TRUE;      /* mark them as calculated */
    }
    /* 3. set backmax for each dimension */
    make_backmax(wrk);
    /* 4. calculate the rest with pruning*/
    thres = wrk->OP_calced_score[num-1];
    for (i = 0; i < gnum; i++) {
      /* skip calced ones in 1. */
      if (wrk->mixcalced[i]) {
        wrk->mixcalced[i] = FALSE;
        continue;
      }
      /* compute with safe pruning */
      score = compute_g_heu_pruning(wrk, g[i], thres);
      if (score > LOG_ZERO) {
	num = cache_push(wrk, i, score, num);
	thres = wrk->OP_calced_score[num-1];
      }
    }
  } else {			/* in case the last_id not available */
    /* at the first 0 frame */
    /* calculate with safe pruning */
    thres = LOG_ZERO;
    for (i = 0; i < gnum; i++) {
      if (num < wrk->OP_gprune_num) {
	score = compute_g_base(wrk, g[i]);
      } else {
	score = compute_g_safe(wrk, g[i], thres);
	if (score <= thres) continue;
      }
      num = cache_push(wrk, i, score, num);
      thres = wrk->OP_calced_score[num-1];
    }
  }
  wrk->OP_calced_num = num;
}
Exemple #3
0
static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
{
    unsigned char flags = 0x01;
    unsigned long addr = virt_to_bus(cmd->SCp.ptr);

    /* setup dma direction */
    if (!dir_in)
	flags |= 0x04;

    /* remember direction */
    HDATA(mvme147_host)->dma_dir = dir_in;

    if (dir_in)
  	/* invalidate any cache */
	cache_clear (addr, cmd->SCp.this_residual);
    else
	/* push any dirty cache */
	cache_push (addr, cmd->SCp.this_residual);

    /* start DMA */
    m147_pcc->dma_bcr   = cmd->SCp.this_residual | (1<<24);
    m147_pcc->dma_dadr  = addr;
    m147_pcc->dma_cntrl = flags;

    /* return success */
    return 0;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
	struct cyber_dma_registers *dregs = 
		(struct cyber_dma_registers *) esp->dregs;

	cache_push(addr, length);

	addr |= 1;
	dregs->dma_addr0 = (addr >> 24) & 0xff;
	dregs->dma_addr1 = (addr >> 16) & 0xff;
	dregs->dma_addr2 = (addr >>  8) & 0xff;
	dregs->dma_addr3 = (addr      ) & 0xff;
	ctrl_data |= CYBER_DMA_WRITE;

	/* See comment above */
#if 0
	if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
						(addr < 0xff0000)))
		ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */
	else
		ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
#else
	ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */
#endif
	dregs->ctrl_reg = ctrl_data;
}
Exemple #5
0
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
	struct Scsi_Host *instance = cmd->device->host;
	struct WD33C93_hostdata *hdata = shost_priv(instance);
	unsigned char flags = 0x01;
	unsigned long addr = virt_to_bus(cmd->SCp.ptr);

	/* setup dma direction */
	if (!dir_in)
		flags |= 0x04;

	/* remember direction */
	hdata->dma_dir = dir_in;

	if (dir_in) {
		/* invalidate any cache */
		cache_clear(addr, cmd->SCp.this_residual);
	} else {
		/* push any dirty cache */
		cache_push(addr, cmd->SCp.this_residual);
	}

	/* start DMA */
	m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
	m147_pcc->dma_dadr = addr;
	m147_pcc->dma_cntrl = flags;

	/* return success */
	return 0;
}
Exemple #6
0
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
    unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
    unsigned long addr = virt_to_bus(cmd->SCp.ptr);

    /*
     * if the physical address has the wrong alignment, or if
     * physical address is bad, or if it is a write and at the
     * end of a physical memory chunk, then allocate a bounce
     * buffer
     */
    if (addr & A3000_XFER_MASK)
    {
	HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
	    & ~0x1ff;
	HDATA(a3000_host)->dma_bounce_buffer =
	    kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL);
	
	/* can't allocate memory; use PIO */
	if (!HDATA(a3000_host)->dma_bounce_buffer) {
	    HDATA(a3000_host)->dma_bounce_len = 0;
	    return 1;
	}

	if (!dir_in) {
	    /* copy to bounce buffer for a write */
	    memcpy (HDATA(a3000_host)->dma_bounce_buffer,
		cmd->SCp.ptr, cmd->SCp.this_residual);
	}

	addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
    }

    /* setup dma direction */
    if (!dir_in)
	cntr |= CNTR_DDIR;

    /* remember direction */
    HDATA(a3000_host)->dma_dir = dir_in;

    DMA(a3000_host)->CNTR = cntr;

    /* setup DMA *physical* address */
    DMA(a3000_host)->ACR = addr;

    if (dir_in)
  	/* invalidate any cache */
	cache_clear (addr, cmd->SCp.this_residual);
    else
	/* push any dirty cache */
	cache_push (addr, cmd->SCp.this_residual);

    /* start DMA */
    mb();			/* make sure setup is completed */
    DMA(a3000_host)->ST_DMA = 1;
    mb();			/* make sure DMA has started before next IO */

    /* return success */
    return 0;
}
Exemple #7
0
void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
#if MKIV
	struct blz1230_dma_registers *dregs = 
		(struct blz1230_dma_registers *) (esp->dregs);
#else
	struct blz1230II_dma_registers *dregs = 
		(struct blz1230II_dma_registers *) (esp->dregs);
#endif

	cache_push(addr, length);

	addr >>= 1;
	addr |= BLZ1230_DMA_WRITE;

	/* First set latch */
	dregs->dma_latch = (addr >> 24) & 0xff;

	/* Then pump the address to the DMA address register */
#if MKIV
	dregs->dma_addr = (addr >> 24) & 0xff;
#endif
	dregs->dma_addr = (addr >> 16) & 0xff;
	dregs->dma_addr = (addr >>  8) & 0xff;
	dregs->dma_addr = (addr      ) & 0xff;
}
Exemple #8
0
/** 
 * @brief  Compute a set of Gaussians with safe pruning.
 *
 * If the N-best mixtures in the previous frame is specified in @a last_id,
 * They are first computed to set the initial threshold.
 * After that, the rest of the Gaussians will be computed with the thresholds
 * to drop unpromising Gaussians from computation at early stage
 * of likelihood computation.  If the computation of a Gaussian reached to
 * the end, the threshold will be updated to always hold the likelihood of
 * current N-best score.
 *
 * The calculated scores will be stored to OP_calced_score, with its
 * corresponding mixture id to OP_calced_id.  These are done by calling
 * cache_push().
 * The number of calculated mixtures is also stored in OP_calced_num.
 * 
 * This can be called from calc_tied_mix() or calc_mix().
 * 
 * @param wrk [i/o] HMM computation work area
 * @param g [in] set of Gaussian densities to compute the output probability
 * @param gnum [in] length of above
 * @param last_id [in] ID list of N-best mixture in previous input frame,
 * or NULL if not exist
 * @param lnum [in] length of last_id
 */
void
gprune_safe(HMMWork *wrk, HTK_HMM_Dens **g, int gnum, int *last_id, int lnum)
{
  int i, j, num = 0;
  LOGPROB score, thres;

  if (last_id != NULL) {	/* compute them first to form threshold */
    /* 1. calculate first $OP_gprune_num and set initial threshold */
    for (j=0; j<lnum; j++) {
      i = last_id[j];
      score = compute_g_base(wrk, g[i]);
      num = cache_push(wrk, i, score, num);
      wrk->mixcalced[i] = TRUE;      /* mark them as calculated */
    }
    thres = wrk->OP_calced_score[num-1];
    /* 2. calculate the rest with pruning*/
    for (i = 0; i < gnum; i++) {
      /* skip calced ones in 1. */
      if (wrk->mixcalced[i]) {
        wrk->mixcalced[i] = FALSE;
        continue;
      }
      /* compute with safe pruning */
      score = compute_g_safe(wrk, g[i], thres);
      if (score <= thres) continue;
      num = cache_push(wrk, i, score, num);
      thres = wrk->OP_calced_score[num-1];
    }
  } else {			/* in case the last_id not available */
    /* not tied-mixture, or at the first 0 frame */
    thres = LOG_ZERO;
    for (i = 0; i < gnum; i++) {
      if (num < wrk->OP_gprune_num) {
	score = compute_g_base(wrk, g[i]);
      } else {
	score = compute_g_safe(wrk, g[i], thres);
	if (score <= thres) continue;
      }
      num = cache_push(wrk, i, score, num);
      thres = wrk->OP_calced_score[num-1];
    }
  }
  wrk->OP_calced_num = num;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
	struct cyberII_dma_registers *dregs = 
		(struct cyberII_dma_registers *) esp->dregs;

	cache_push(addr, length);

	addr |= 1;
	dregs->dma_addr0 = (addr >> 24) & 0xff;
	dregs->dma_addr1 = (addr >> 16) & 0xff;
	dregs->dma_addr2 = (addr >>  8) & 0xff;
	dregs->dma_addr3 = (addr      ) & 0xff;
}
Exemple #10
0
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
	struct blz2060_dma_registers *dregs = 
		(struct blz2060_dma_registers *) (esp->dregs);

	cache_push(addr, length);

	addr >>= 1;
	addr |= BLZ2060_DMA_WRITE;
	dregs->dma_addr3 = (addr      ) & 0xff;
	dregs->dma_addr2 = (addr >>  8) & 0xff;
	dregs->dma_addr1 = (addr >> 16) & 0xff;
	dregs->dma_addr0 = (addr >> 24) & 0xff;
}
Exemple #11
0
void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
				size_t size, enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_TO_DEVICE:
		cache_push(handle, size);
		break;
	case DMA_FROM_DEVICE:
		cache_clear(handle, size);
		break;
	default:
		if (printk_ratelimit())
			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
		break;
	}
}
Exemple #12
0
unsigned long long database_push(char *data, size_t len)
{
	struct lnode *n = calloc(sizeof(struct lnode), 1);
	struct file_entry *fe = calloc(sizeof(struct file_entry), 1);

	pthread_mutex_lock(&lock);

	fe->len = len;
	fe->id = next_id++;
	n->data = fe;
	file_list = lnode_push(file_list, n);

	pthread_mutex_unlock(&lock);

	database_write(fe, data);

	cache_push(data, len, fe->id);

	return fe->id;
}
Exemple #13
0
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
	struct fastlane_dma_registers *dregs = 
		(struct fastlane_dma_registers *) (esp->dregs);
	unsigned long *t;

	cache_push(addr, length);

	dma_clear(esp);

	t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));

	dregs->clear_strobe = 0;
	*t = addr;

	ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) | 
		     FASTLANE_DMA_ENABLE |
		     FASTLANE_DMA_WRITE);
	dregs->ctrl_reg = ctrl_data;
}
Exemple #14
0
size_t database_getfile(char *name, char **datap)
{
	struct file_entry *entry = 0;
	char *err = 0;
	unsigned long long id = strtoull(name, &err, 16);
	if ((!id && errno == EINVAL) || (err && isalpha(*err))){
		errno = 0;
		return 0;
	}

	//Check the cache first.
	struct cache_entry *ce = cache_get(id);
	if (ce){
		if (datap)
			*datap = ce->data;
		return ce->len;
	}

	struct lnode *n = database_get(id);

	if (n){
		entry = n->data;
		char *data = database_read(entry);

		if (!data)
			return 0;

		if (datap)
			*datap = data;

		//Put back in cache.
		if (!ce)
			cache_push(data, entry->len, entry->id);

		return entry->len;
	}

	return 0;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
	struct Scsi_Host *instance = cmd->device->host;
	struct a3000_hostdata *hdata = shost_priv(instance);
	struct WD33C93_hostdata *wh = &hdata->wh;
	struct a3000_scsiregs *regs = hdata->regs;
	unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
	unsigned long addr = virt_to_bus(cmd->SCp.ptr);

	/*
	 * if the physical address has the wrong alignment, or if
	 * physical address is bad, or if it is a write and at the
	 * end of a physical memory chunk, then allocate a bounce
	 * buffer
	 */
	if (addr & A3000_XFER_MASK) {
		wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
		wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
						GFP_KERNEL);

		/* can't allocate memory; use PIO */
		if (!wh->dma_bounce_buffer) {
			wh->dma_bounce_len = 0;
			return 1;
		}

		if (!dir_in) {
			/* copy to bounce buffer for a write */
			memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
			       cmd->SCp.this_residual);
		}

		addr = virt_to_bus(wh->dma_bounce_buffer);
	}

	/* setup dma direction */
	if (!dir_in)
		cntr |= CNTR_DDIR;

	/* remember direction */
	wh->dma_dir = dir_in;

	regs->CNTR = cntr;

	/* setup DMA *physical* address */
	regs->ACR = addr;

	if (dir_in) {
		/* invalidate any cache */
		cache_clear(addr, cmd->SCp.this_residual);
	} else {
		/* push any dirty cache */
		cache_push(addr, cmd->SCp.this_residual);
	}

	/* start DMA */
	mb();			/* make sure setup is completed */
	regs->ST_DMA = 1;
	mb();			/* make sure DMA has started before next IO */

	/* return success */
	return 0;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
	struct Scsi_Host *instance = cmd->device->host;
	struct a2091_hostdata *hdata = shost_priv(instance);
	struct WD33C93_hostdata *wh = &hdata->wh;
	struct a2091_scsiregs *regs = hdata->regs;
	unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
	unsigned long addr = virt_to_bus(cmd->SCp.ptr);

	
	if (addr & A2091_XFER_MASK) {
		wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
		wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
						GFP_KERNEL);

		
		if (!wh->dma_bounce_buffer) {
			wh->dma_bounce_len = 0;
			return 1;
		}

		
		addr = virt_to_bus(wh->dma_bounce_buffer);

		
		if (addr & A2091_XFER_MASK) {
			
			kfree(wh->dma_bounce_buffer);
			wh->dma_bounce_buffer = NULL;
			wh->dma_bounce_len = 0;
			return 1;
		}

		if (!dir_in) {
			
			memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
			       cmd->SCp.this_residual);
		}
	}

	
	if (!dir_in)
		cntr |= CNTR_DDIR;

	
	wh->dma_dir = dir_in;

	regs->CNTR = cntr;

	
	regs->ACR = addr;

	if (dir_in) {
		
		cache_clear(addr, cmd->SCp.this_residual);
	} else {
		
		cache_push(addr, cmd->SCp.this_residual);
	}
	
	regs->ST_DMA = 1;

	
	return 0;
}
Exemple #17
0
static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
{
    unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
    unsigned long addr = VTOP(cmd->SCp.ptr);

    /*
     * if the physical address has the wrong alignment, or if
     * physical address is bad, or if it is a write and at the
     * end of a physical memory chunk, then allocate a bounce
     * buffer
     */
    if (addr & A3000_XFER_MASK ||
	(!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
    {
	HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
	    & ~0x1ff;
	HDATA(a3000_host)->dma_bounce_buffer =
	    scsi_malloc (HDATA(a3000_host)->dma_bounce_len);
	
	/* can't allocate memory; use PIO */
	if (!HDATA(a3000_host)->dma_bounce_buffer) {
	    HDATA(a3000_host)->dma_bounce_len = 0;
	    return 1;
	}

	if (!dir_in) {
	    /* copy to bounce buffer for a write */
	    if (cmd->use_sg) {
		memcpy (HDATA(a3000_host)->dma_bounce_buffer,
			cmd->SCp.ptr, cmd->SCp.this_residual);
	    } else
		memcpy (HDATA(a3000_host)->dma_bounce_buffer,
			cmd->request_buffer, cmd->request_bufflen);
	}

	addr = VTOP(HDATA(a3000_host)->dma_bounce_buffer);
    }

    /* setup dma direction */
    if (!dir_in)
	cntr |= CNTR_DDIR;

    /* remember direction */
    HDATA(a3000_host)->dma_dir = dir_in;

    DMA(a3000_host)->CNTR = cntr;

    /* setup DMA *physical* address */
    DMA(a3000_host)->ACR = addr;

    if (dir_in)
  	/* invalidate any cache */
	cache_clear (addr, cmd->SCp.this_residual);
    else
	/* push any dirty cache */
	cache_push (addr, cmd->SCp.this_residual);

    /* start DMA */
    DMA(a3000_host)->ST_DMA = 1;

    /* return success */
    return 0;
}
Exemple #18
0
/** 
 * @brief  Compute a set of Gaussians with beam pruning.
 *
 * If the N-best mixtures in the previous frame is specified in @a last_id,
 * They are first computed to set the thresholds for each dimension.
 * After that, the rest of the Gaussians will be computed with those dimension
 * thresholds to drop unpromising Gaussians from computation at early stage
 * of likelihood computation.  If the @a last_id is not specified (typically
 * at the first frame of the input), a safe pruning as same as one in
 * gprune_safe.c will be applied.
 *
 * The calculated scores will be stored to OP_calced_score, with its
 * corresponding mixture id to OP_calced_id.  These are done by calling
 * cache_push().
 * The number of calculated mixtures is also stored in OP_calced_num.
 * 
 * This can be called from calc_tied_mix() or calc_mix().
 * 
 * @param wrk [i/o] HMM computation work area
 * @param g [in] set of Gaussian densities to compute the output probability
 * @param gnum [in] length of above
 * @param last_id [in] ID list of N-best mixture in previous input frame,
 * or NULL if not exist
 * @param lnum [in] length of last_id
 */
void
gprune_beam(HMMWork *wrk, HTK_HMM_Dens **g, int gnum, int *last_id, int lnum)
{
  int i, j, num = 0;
  LOGPROB score, thres;

  if (last_id != NULL) {	/* compute them first to form thresholds */
    /* 1. clear dimthres */
    clear_dimthres(wrk);
    /* 2. calculate first $OP_gprune_num and set initial thresholds */
    for (j=0; j<lnum; j++) {
      i = last_id[j];
#ifdef TEST2
      if (!g[i]) {
	score = LOG_ZERO;
      } else {
	score = compute_g_beam_updating(wrk, g[i]);
      }
      num = cache_push(wrk, i, score, num);
#else
      score = compute_g_beam_updating(wrk, g[i]);
      num = cache_push(wrk, i, score, num);
#endif
      wrk->mixcalced[i] = TRUE;      /* mark them as calculated */
    }
    /* 3. set pruning thresholds for each dimension */
    set_dimthres(wrk);

    /* 4. calculate the rest with pruning*/
    for (i = 0; i < gnum; i++) {
      /* skip calced ones in 1. */
      if (wrk->mixcalced[i]) {
        wrk->mixcalced[i] = FALSE;
        continue;
      }
#ifdef TEST2
      /* compute with safe pruning */
      if (!g[i]) continue;
      score = compute_g_beam_pruning(wrk, g[i]);
      if (score > LOG_ZERO) {
	num = cache_push(wrk, i, score, num);
      }
#else
      /* compute with safe pruning */
      score = compute_g_beam_pruning(wrk, g[i]);
      if (score > LOG_ZERO) {
	num = cache_push(wrk, i, score, num);
      }
#endif
    }
  } else {			/* in case the last_id not available */
    /* at the first 0 frame */
    /* calculate with safe pruning */
    thres = LOG_ZERO;
    for (i = 0; i < gnum; i++) {
      if (num < wrk->OP_gprune_num) {
	score = compute_g_base(wrk, g[i]);
      } else {
	score = compute_g_safe(wrk, g[i], thres);
	if (score <= thres) continue;
      }
      num = cache_push(wrk, i, score, num);
      thres = wrk->OP_calced_score[num-1];
    }
  }
  wrk->OP_calced_num = num;
}
Exemple #19
0
static void test2(void) {
  uint16_t tag;
  int32_t x, y;
  uint32_t i;
  cache_elem_t *e0, *e1;
  uint32_t level;


  printf("***************************\n"
	 "*        TEST 2           *\n"
	 "***************************\n");

  level = 1;

  init_cache(&cache);
  printf("---- INITIAL ----\n");
  print_cache_stack(stdout, &cache);
  print_cache_bank(stdout, &cache);
  printf("\n");

  for (i=0; i<4000; i++) {
    tag = (uint16_t) (random() % NUMTAGS);
    x = (int32_t) (random() % 20);
    y = (int32_t) (random() % 20);
    e0 = cache_find(&cache, tag, x, y);
    e1 = cache_get(&cache, tag, x, y);

    if (e0 == NULL && e1->flag != NEW_CACHE_ELEM) {
      printf("*** caching bug ***\n");
      printf("  element: [%s %"PRId32" %"PRId32"]\n", tag2string[tag], x, y);
      printf("  find: returned NULL\n");
      printf("  get:  returned %p: ", e1);
      print_cache_elem(stdout, e1);
      printf("\n");
    }

    if (e0 != NULL && e1 != e0) {
      printf("*** caching bug ***\n");
      printf("  element: [%s %"PRId32" %"PRId32"]\n", tag2string[tag], x, y);
      printf("  find: returned %p: ", e0);
      print_cache_elem(stdout, e0);
      printf("\n");
      printf("  get:  returned %p: ", e1);
      print_cache_elem(stdout, e1);
      printf("\n");
    }

    if (e1->flag == NEW_CACHE_ELEM) {
      e1->flag = (uint16_t) level;
    }

    if (i % 100 == 49) {
      printf("\n--- Push to level %"PRIu32" ---\n", level);
      cache_push(&cache);
      level ++;
      print_cache_stack(stdout, &cache);
      print_cache_bank(stdout, &cache);
    }
  }

  printf("\n--- After random additions (level = %"PRIu32") ---\n", level);
  print_all_cache(stdout, &cache);
  print_cache_stack(stdout, &cache);
  print_cache_bank(stdout, &cache);

  while (level > 1) {
    level --;
    printf("\n--- Pop to level %"PRIu32" ---\n", level);
    cache_pop(&cache);
    print_all_cache(stdout, &cache);
    print_cache_stack(stdout, &cache);
    print_cache_bank(stdout, &cache);
  }

  printf("\n--- After reset ---\n");
  reset_cache(&cache);
  print_all_cache(stdout, &cache);
  print_cache_stack(stdout, &cache);
  print_cache_bank(stdout, &cache);

  delete_cache(&cache);
}
Exemple #20
0
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
	struct Scsi_Host *instance = cmd->device->host;
	struct a2091_hostdata *hdata = shost_priv(instance);
	struct WD33C93_hostdata *wh = &hdata->wh;
	struct a2091_scsiregs *regs = hdata->regs;
	unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
	unsigned long addr = virt_to_bus(cmd->SCp.ptr);

	/* don't allow DMA if the physical address is bad */
	if (addr & A2091_XFER_MASK) {
		wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
		wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
						GFP_KERNEL);

		/* can't allocate memory; use PIO */
		if (!wh->dma_bounce_buffer) {
			wh->dma_bounce_len = 0;
			return 1;
		}

		/* get the physical address of the bounce buffer */
		addr = virt_to_bus(wh->dma_bounce_buffer);

		/* the bounce buffer may not be in the first 16M of physmem */
		if (addr & A2091_XFER_MASK) {
			/* we could use chipmem... maybe later */
			kfree(wh->dma_bounce_buffer);
			wh->dma_bounce_buffer = NULL;
			wh->dma_bounce_len = 0;
			return 1;
		}

		if (!dir_in) {
			/* copy to bounce buffer for a write */
			memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
			       cmd->SCp.this_residual);
		}
	}

	/* setup dma direction */
	if (!dir_in)
		cntr |= CNTR_DDIR;

	/* remember direction */
	wh->dma_dir = dir_in;

	regs->CNTR = cntr;

	/* setup DMA *physical* address */
	regs->ACR = addr;

	if (dir_in) {
		/* invalidate any cache */
		cache_clear(addr, cmd->SCp.this_residual);
	} else {
		/* push any dirty cache */
		cache_push(addr, cmd->SCp.this_residual);
	}
	/* start DMA */
	regs->ST_DMA = 1;

	/* return success */
	return 0;
}