コード例 #1
0
static void mel_loglookup_with_offset(front_cep *cepobj,
                                      front_channel *channel)
/*
**  pwr spect -> filter bank output */
{
  int ii;

  if (channel->shift > 0)
    for (ii = 0; ii < channel->num_freq; ii++)
    {
      channel->filterbank[ii] = (cepdata) log_lookup(&cepobj->logtab,
                                (int)(channel->filterbank[ii] +
                                      SHIFT_DOWN(cepobj->mel_offset[ii], channel->shift)),
                                channel->shift);
    }
  else
    for (ii = 0; ii < channel->num_freq; ii++)
    {
      channel->filterbank[ii] = (cepdata) log_lookup(&cepobj->logtab,
                                (int)(channel->filterbank[ii] +
                                      SHIFT_UP(cepobj->mel_offset[ii], -channel->shift)),
                                channel->shift);
    }

  return;
}
コード例 #2
0
void inverse_transform_frame (preprocessed *prep, imeldata *fram, int do_shift)
/*
**  Note the matrix is the transpose of the transformation
**  To transform a single frame in place */
{
    int	     ii, jj;
    imeldata vec[MAX_DIMEN];

    ASSERT (prep);
    ASSERT (prep->dim < MAX_DIMEN);
    ASSERT (fram);

    if (prep->offset && do_shift)
	for (ii= 0; ii < prep->dim; ii++)
	    fram[ii] -= prep->offset[ii];

    for (ii= 0; ii < prep->dim; ii++) {
	vec[ii]= 0;
	for (jj= 0; jj < prep->dim; jj++)
	    vec[ii] += prep->invmat[ii][jj] * fram[jj];
	vec[ii]= SHIFT_DOWN (vec[ii], prep->inv_shift);
    //floating pt // for (jj= 0; jj < prep->dim; jj++)
	//floating pt // vec[ii] += (imeldata)(prep->inverse[ii][jj] * fram[jj]);
    }
    if (do_shift)
	for (ii= 0; ii < prep->dim; ii++)
	    fram[ii]= RANGE (vec[ii], 0, 255);
    else
	for (ii= 0; ii < prep->dim; ii++)
	    fram[ii]= vec[ii];
    return;
}
コード例 #3
0
ファイル: bcol_basesmuma_buf_mgmt.c プロジェクト: IanYXXL/A1
/*
 * With support for nonblocking collectives, we don't have an upper
 * limit on the number of outstanding collectives per communicator.
 * Also, since we want to avoid communication to figure out which
 * buffers other ranks in the group will use, we will rely on the
 * fact that collective operations are called in the same order
 * in each process, to assign a unique ID to each collective operation.
 * We use this to create a static mapping from the index to the buffer
 * that will be used.  Also, because there is no limit to the number of
 * outstanding collective operations, we use a generation index for each
 * memory bank, so the collective will use the buffer only when the
 * correct generation of the bank is ready for use.
 */
int bcol_basesmuma_get_buff_index( sm_buffer_mgmt *buff_block,
                                   uint64_t buff_id )
{
    /* local variables */
    int memory_bank;
    uint64_t generation;
    int index=-1;


    /* get the bank index that will be used */
    memory_bank=buff_id& buff_block->mask;
    memory_bank = memory_bank SHIFT_DOWN buff_block->log2_num_buffs_per_mem_bank;

    /* get the generation of the bank this maps to */
    generation = buff_id SHIFT_DOWN (buff_block->log2_number_of_buffs);

    /* check to see if the bank is available */
    if( generation == buff_block->ctl_buffs_mgmt[memory_bank].
        bank_gen_counter ) {

        /* get the buffer index that will be returned */
        index=buff_id & buff_block->mask;

        /* no in-use counter increment, as the mapping is static, and
         * all we need to know if the number of collectives that complete */

    } else {
        /* progress communications so that resources can be freed up */
        opal_progress();
    }

    /* return */
    return index;
}
コード例 #4
0
void linear_transform_frame(preprocessed *prep, imeldata *fram, int do_shift)
/*
**  Note the matrix is the transpose of the transformation
**  To transform a single frame in place */
{
  int      ii, jj;
  imeldata vec[MAX_DIMEN];
  int dim = prep->dim;

  ASSERT(prep);
  ASSERT(prep->dim < MAX_DIMEN);
  ASSERT(fram);
  for (ii = 0; ii < dim; ii++)
  {
    vec[ii] = 0;
    for (jj = 0; jj < prep->dim; jj++)
      vec[ii] += prep->matrix[ii][jj] * fram[jj];
    ASSERT(prep->imel_shift > 0);
    vec[ii] = (imeldata) SHIFT_DOWN((int)vec[ii],
                                    (unsigned int)prep->imel_shift);
  }

  if (do_shift)
  {
    if (prep->offset)
      for (ii = 0; ii < dim; ii++)
        fram[ii] = RANGE(vec[ii] + prep->offset[ii], 0, 255);
    else
      for (ii = 0; ii < dim; ii++)
        fram[ii] = RANGE(vec[ii], 0, 255);
  }
  else
  {
    for (ii = 0; ii < dim; ii++)
      fram[ii] = vec[ii];
  }
  return;
}
コード例 #5
0
ファイル: bcol_basesmuma_buf_mgmt.c プロジェクト: IanYXXL/A1
/* release the shared memory buffers
 *  buf_id is the unique ID assigned to the particular buffer
 */
int bcol_basesmuma_free_buff( sm_buffer_mgmt * buff_block,
                              uint64_t buff_id )
{
    /* local variables */
    int ret=OMPI_SUCCESS;
    int memory_bank;
    uint64_t generation;
    mca_bcol_basesmuma_component_t *cs = &mca_bcol_basesmuma_component;

    /* get the bank index that will be used */
    memory_bank=buff_id& buff_block->mask;
    memory_bank = memory_bank SHIFT_DOWN buff_block->log2_num_buffs_per_mem_bank;

    /* get the generation of the bank this maps to */
    generation = buff_id SHIFT_DOWN (buff_block->log2_number_of_buffs);

    /* the generation counter should not change until all resrouces
     *   associated with this bank have been freed.
     */
    assert(generation == buff_block->ctl_buffs_mgmt[memory_bank].bank_gen_counter);

    /*
     * increment counter of completed buffers
     */
    OPAL_THREAD_ADD32(&(buff_block->ctl_buffs_mgmt[memory_bank].n_buffs_freed),
                      1);

    /*
     * If I am the last to checkin - initiate resource recycling
     */
    if( buff_block->ctl_buffs_mgmt[memory_bank].n_buffs_freed ==
        buff_block->ctl_buffs_mgmt[memory_bank].number_of_buffers ) {

        /* Lock to ensure atomic recycling of resources */
        OPAL_THREAD_LOCK(&(buff_block->ctl_buffs_mgmt[memory_bank].mutex));

        /* make sure someone else did not already get to this */
        if( buff_block->ctl_buffs_mgmt[memory_bank].n_buffs_freed !=
            buff_block->ctl_buffs_mgmt[memory_bank].number_of_buffers ) {
            /* release lock and exit */
            OPAL_THREAD_UNLOCK(&(buff_block->ctl_buffs_mgmt[memory_bank].mutex));
        } else {
            sm_nbbar_desc_t *p_sm_nb_desc = NULL;
            /* initiate the freeing of resources.  Need to make sure the other
             * ranks in the group are also done with their resources before this
             * block is made available for use again.
             * No one else will try to allocate from this block or free back to
             * this block until the next genration counter has been incremented,
             * so will just reset the number of freed buffers to 0, so no one else
             * will try to also initialize the recycling of these resrouces
             */
            buff_block->ctl_buffs_mgmt[memory_bank].n_buffs_freed=0;

            /* Start the nonblocking barrier */
            p_sm_nb_desc = &(buff_block->ctl_buffs_mgmt[memory_bank].nb_barrier_desc);
            p_sm_nb_desc->coll_buff = buff_block;
            bcol_basesmuma_rd_nb_barrier_init_admin(p_sm_nb_desc);

            if( NB_BARRIER_DONE !=
                buff_block->ctl_buffs_mgmt[memory_bank].
                nb_barrier_desc.collective_phase) {

                opal_list_t *list=&(cs->nb_admin_barriers);
                opal_list_item_t *append_item;

                /* put this onto the progression list */
                OPAL_THREAD_LOCK(&(cs->nb_admin_barriers_mutex));
                append_item=(opal_list_item_t *)
                    &(buff_block->ctl_buffs_mgmt[memory_bank].nb_barrier_desc);
                opal_list_append(list,append_item);
                OPAL_THREAD_UNLOCK(&(cs->nb_admin_barriers_mutex));
                /* progress communications so that resources can be freed up */
                opal_progress();
            } else {
                /* mark the block as available */
                (buff_block->ctl_buffs_mgmt[memory_bank].bank_gen_counter)++;
            }

            /* get out of here */
            OPAL_THREAD_UNLOCK(&(buff_block->ctl_buffs_mgmt[memory_bank].mutex));
        }

    }

    /* return */
    return ret;
}