Beispiel #1
0
int main()
{
    linear_searchspace<5, int> lsp{};
    auto t = lsp(0, 1, 2, 3, 4);
    std::cout << std::get<1>(t) << std::endl;

    auto t2 = lsp(4, 3, 2, 1, 0);
    std::cout << std::get<0>(t2) << std::endl;
}
/**
@brief This function demonstrates how to perform "Textless Lipsync"

Given an audio file, this method will use SAPI to guess at words
and word timings and phoneme timings in the specified audio file. 
The results are printed to std::out.

This is the best code to look at first

@param strAudioFile - [in] audio file
**/
void
run_sapi_textless_lipsync(std::wstring& strAudioFile, std::wstring outFile)
{
	// 1. [optional] declare the SAPI 5.1 estimator. 
	// NOTE: for different phoneme sets: create a new estimator
	phoneme_estimator sapi51Estimator;

	// 2. declare the sapi lipsync object and call the lipsync method to
	// start the lipsync process
	sapi_textless_lipsync lsp(&sapi51Estimator);
	if (lsp.lipsync(strAudioFile))
    {
		// 3. Run the message loop and wait till the lipsync is finished
        run_lipsync_message_loop(lsp);
		
		// 4. finalize the lipsync results for printing
		// this call will estimate phoneme timings 
		lsp.finalize_phoneme_alignment();

		// 5. print the results to the output stream
		//lsp.print_results(std::cout);
		
		// 5'. print results in a file
		std::wstring strOutFile = outFile + L".phonemes.xml";
		std::ofstream outstream(strOutFile.c_str());
		outstream << "<PhonemeTimings audiofile=\"" << wstring_2_string(strAudioFile) << "\" >" << std::endl;
		lsp.print_results(outstream);
		outstream << "</PhonemeTimings>" << std::endl;
		outstream.close();            
	}
	else
	{
		std::wcerr << lsp.getErrorString() << std::endl;
	}
}
Beispiel #3
0
static int handle_lsp_request(struct mg_connection *conn, const char *path,
                               struct file *filep, struct lua_State *ls) {
  void *p = NULL;
  lua_State *L = NULL;
  FILE *fp = NULL;
  int error = 1;

  // We need both mg_stat to get file size, and mg_fopen to get fd
  if (!mg_stat(path, filep) || (fp = mg_fopen(path, "r")) == NULL) {
    lsp_send_err(conn, ls, "File [%s] not found", path);
  } else if ((p = mmap(NULL, (size_t) filep->size, PROT_READ, MAP_PRIVATE,
                       fileno(fp), 0)) == MAP_FAILED) {
    lsp_send_err(conn, ls, "mmap(%s, %zu, %d): %s", path, (size_t) filep->size,
              fileno(fp), strerror(errno));
  } else if ((L = ls != NULL ? ls : luaL_newstate()) == NULL) {
    send_http_error(conn, 500, http_500_error, "%s", "luaL_newstate failed");
  } else {
    // We're not sending HTTP headers here, Lua page must do it.
    if (ls == NULL) {
      prepare_lua_environment(conn, L);
    }
    error = lsp(conn, path, p, filep->size, L);
  }

  if (L != NULL && ls == NULL) lua_close(L);
  if (p != NULL) munmap(p, filep->size);
  fclose(fp);

  return error;
}
Beispiel #4
0
REPLACE_STATIC void handle_lsp_request(struct mg_connection *conn, const char *path,
                               struct file *filep) {
  void *p = NULL;
  lua_State *L = NULL;

  if (!mg_fopen(conn, path, "r", filep)) {
    send_http_error(conn, 404, "Not Found", "%s", "File not found");
  } else if (filep->membuf == NULL &&
             (p = mmap(NULL, filep->size, PROT_READ, MAP_PRIVATE,
                       fileno(filep->fp), 0)) == MAP_FAILED) {
    send_http_error(conn, 500, http_500_error, "%s", "x");
  } else if ((L = luaL_newstate()) == NULL) {
    send_http_error(conn, 500, http_500_error, "%s", "y");
  } else {
    mg_printf(conn, "%s", "HTTP/1.1 200 OK\r\n"
              "Content-Type: text/html\r\nConnection: close\r\n\r\n");
    prepare_lua_environment(conn, L);
    conn->request_info.ev_data = L;
    call_user(conn, MG_INIT_LUA);
    lsp(conn, filep->membuf == NULL ? p : filep->membuf, filep->size, L);
  }

  if (L) lua_close(L);
  if (p) munmap(p, filep->size);
  mg_fclose(filep);
}
Beispiel #5
0
int main(int argc, char** argv)
{

 ros::init(argc, argv, "laser_roi_node");
 ros::NodeHandle n;
 LaserRoiNode lsp(n);
 ROS_INFO("laser_roi_node initialized.\n");
 ros::spin();

 return 0;
}
Beispiel #6
0
int main(void)
{
  LSA_HANDLE  lsa_handle;
  BOOL        success;

  if (!lsa_open(&lsa_handle))
    return 1;

  success = lsp(lsa_handle);

  lsa_close(lsa_handle);
  return success ? 0 : 1;
}
/**
@brief This function demonstrates how to perform "Text Based Lipsync"

Given an audio file and a text file, this method will generate word timings and
phoneme timings of the text file to the audio file using sapi_lipsync and
helpers.

This is the best code to look at first.

The results are printed to std::out
@param strAudioFile - [in] audio file
@param strTextFile - [in] text file
**/
void
run_sapi_textbased_lipsync(std::wstring& strAudioFile, TCHAR *strTextFile, std::wstring outFile)
{
	// 1. [optional] declare the SAPI 5.1 estimator. 
	// NOTE: for different phoneme sets, create a new estimator
	phoneme_estimator sapi51Estimator;

	// 2. Load the text file into memory
	WCHAR * pwszCoMem = 0;
	ULONG cch = 0;
    HRESULT hr = GetTextFile(strTextFile, &pwszCoMem, &cch);
	if (hr == S_OK)
    {
		std::wstring strText(pwszCoMem, cch);

		// 3. declare the sapi lipsync object and call the lipsync method 
		// to start the lipsync process
        sapi_textbased_lipsync lsp(&sapi51Estimator);
		if (lsp.lipsync(strAudioFile, strText))
		{

			// 4. Run the message loop and wait till the lipsync is 
			// finished
			run_lipsync_message_loop(lsp);
              
			// 5. finalize the lipsync results for printing
			// this call will estimate phoneme timings 
			lsp.finalize_phoneme_alignment();
			
			// 6. print the results to the output stream
			//lsp.print_results(std::cout);
			
			// 6'. print results in a file
			std::wstring strOutFile = outFile + L".phonemes.xml";
			std::ofstream outstream(strOutFile.c_str());
			outstream << "<PhonemeTimings audiofile=\"" << wstring_2_string(strAudioFile) << "\" >" << std::endl;
			lsp.print_results(outstream);
			outstream << "</PhonemeTimings>" << std::endl;
			outstream.close();             
		}
		else
		{
			std::wcerr << lsp.getErrorString() << std::endl;			
		}
	}
	else
	{
		std::wcerr << L"Can't open text transcript file" << std::endl;
	}
}
void LanguageServerCluster::StartServer(const LanguageServerEntry& entry)
{
    if(entry.IsEnabled()) {
        if(!entry.IsValid()) {
            clWARNING() << "LSP Server" << entry.GetName()
                        << "is not valid and it will not be started (one of the specified paths do not "
                           "exist)";
            LanguageServerConfig::Get().GetServers()[entry.GetName()].SetEnabled(false);
            LanguageServerConfig::Get().Save();
            return;
        }

        LanguageServerProtocol::Ptr_t lsp(new LanguageServerProtocol(entry.GetName(), entry.GetNetType(), this));
        lsp->SetPriority(entry.GetPriority());
        lsp->SetDisaplayDiagnostics(entry.IsDisaplayDiagnostics());
        lsp->SetUnimplementedMethods(entry.GetUnimplementedMethods());

        wxArrayString lspCommand;
        lspCommand.Add(entry.GetExepath());

        if(!entry.GetArgs().IsEmpty()) {
            wxArrayString args = ::wxStringTokenize(entry.GetArgs(), " ", wxTOKEN_STRTOK);
            lspCommand.insert(lspCommand.end(), args.begin(), args.end());
        }

        wxString rootDir;
        if(clWorkspaceManager::Get().GetWorkspace()) {
            rootDir = clWorkspaceManager::Get().GetWorkspace()->GetFileName().GetPath();
        }
        clDEBUG() << "Starting lsp:";
        clDEBUG() << "Connection string:" << entry.GetConnectionString();
        clDEBUG() << "lspCommand:" << lspCommand;
        clDEBUG() << "entry.GetWorkingDirectory():" << entry.GetWorkingDirectory();
        clDEBUG() << "rootDir:" << rootDir;
        clDEBUG() << "entry.GetLanguages():" << entry.GetLanguages();

        size_t flags = 0;
        lsp->Start(lspCommand, entry.GetConnectionString(), entry.GetWorkingDirectory(), rootDir, entry.GetLanguages(),
                   flags);
        m_servers.insert({ entry.GetName(), lsp });
    }
}
Beispiel #9
0
void pthread_sync::dump_measurements() {
#if DO_LIBPDTHREAD_MEASUREMENTS == 1
    
    char outfile[256], summary[256];
    FILE* out_file;

    snprintf(outfile, 255, "lock-stats-for-%s-%d:%d.txt\0", comment, getpid(), thr_self());
    
    out_file = fopen(outfile, "w+");    
    
    lock_stats_printer lsp(out_file, &lock_stats, comment);
    
    lock_stats.map_keys(&lsp);

    fprintf(out_file, "total ops on this monitor: %d\n\0", lsp.get_total_ops());

    fclose(out_file);
    
#endif /* DO_LIBPDTHREAD_MEASUREMENTS == 1 */
}
Beispiel #10
0
/******************************************
 * Patch the parent of declarations to be the new function literal.
 */
void lambdaSetParent(Expression *e, Scope *sc)
{
    class LambdaSetParent : public StoppableVisitor
    {
        Scope *sc;
    public:
        LambdaSetParent(Scope *sc) : sc(sc) {}

        void visit(Expression *)
        {
        }

        void visit(DeclarationExp *e)
        {
            e->declaration->parent = sc->parent;
        }

        void visit(IndexExp *e)
        {
            if (e->lengthVar)
            {
                //printf("lengthVar\n");
                e->lengthVar->parent = sc->parent;
            }
        }

        void visit(SliceExp *e)
        {
            if (e->lengthVar)
            {
                //printf("lengthVar\n");
                e->lengthVar->parent = sc->parent;
            }
        }
    };

    LambdaSetParent lsp(sc);
    walkPostorder(e, &lsp);
}
  __host__ __device__ void benchmark_matcher_kernel(thread_info<target> ti,
                                                  i_short2* particles_vec,
                                                  unsigned n_particles,
                                                  F f,
                                                  kernel_image2d<T> particles,
                                                  kernel_image2d<T> new_particles,
                                                  kernel_image2d<i_short2> matches,
                                                  kernel_image2d<typename F::feature_t> states,
                                                  const kernel_image2d<i_short2> ls_matches,
                                                  i_short2 mvt,
                                                  T* compact_particles
                                                  ,kernel_image2d<i_float4> dist
                                                  )
  {
    unsigned threadid = ti.blockIdx.x * ti.blockDim.x + ti.threadIdx.x;
    if (threadid >= n_particles) return;
    point2d<int> p = particles_vec[threadid];

    if (!particles.has(p))
      return;

    matches(p) = i_short2(-1, -1);

    if (p.row() < 6 || p.row() >= particles.domain().nrows() - 6 ||
        p.col() < 6 || p.col() >= particles.domain().ncols() - 6)
    {
      new_particles(p).age = 0;
      return;
    }

    if (particles(p).age == 0)
      return;

    // Search for the best match in the neighborhood.
    short p_age = particles(p).age;
    float p_pertinence = particles(p).pertinence;
    typename F::feature_t p_state = states(p);

    point2d<int> match = p;

    bool ls_p_found = false;

    float match_distance;
    point2d<int> prediction = i_int2(p);
    if (p_age == 1)
    {
      point2d<int> lsp(p.row() / 2, p.col() / 2);
      point2d<int> ls_pred;
      {for_all_in_static_neighb2d(lsp, n, c25)
        {
          if (ls_matches(n).x != -1)
          {
            ls_pred = point2d<int>(ls_matches(n).x * 2,
                                   ls_matches(n).y * 2);
            ls_p_found = true;
          }
        }
      }

      match = p;

      match_distance = f.distance_linear(p_state, p);
    }
Beispiel #12
0
// This function will be called by mongoose on every new request.
int web_engine::begin_request_handler(struct mg_connection *conn)
{
	astring file_path(mg_get_option(m_server, "document_root"), PATH_SEPARATOR, conn->uri);
	if (filename_endswith(file_path.c_str(), ".lp"))
	{
		FILE *fp = NULL;
		if ((fp = fopen(file_path.c_str(), "rb")) != NULL) {
		fseek (fp, 0, SEEK_END);
		size_t size = ftell(fp);
		fseek (fp, 0, SEEK_SET);
		char *data = (char*)mg_mmap(fp,size);

		lua_State *L = luaL_newstate();
		prepare_lua_environment(conn, L);
		lsp(conn, data, (int) size, L);
		if (L != NULL) lua_close(L);
		mg_munmap(data,size);
		fclose(fp);
		return MG_TRUE;
		} else {
		return MG_FALSE;
		}
	}
	else if (!strncmp(conn->uri, "/json/",6))
	{
		if (!strcmp(conn->uri, "/json/game"))
		{
			return json_game_handler(conn);
		}
		if (!strcmp(conn->uri, "/json/slider"))
		{
			return json_slider_handler(conn);
		}
	}
	else if (!strncmp(conn->uri, "/keypost",8))
	{
		// Is there any sane way to determine the length of the buffer before getting it?
		// A request for a way was previously filed with the mongoose devs,
		// but it looks like it was never implemented.

		// For now, we'll allow a paste buffer of 32k.
		// To-do: Send an error if the paste is too big?
		char cmd_val[32768];

		int pastelength = mg_get_var(conn, "val", cmd_val, sizeof(cmd_val));
		if (pastelength > 0) {
			machine().ioport().natkeyboard().post_utf8(cmd_val);
		}
		// Send HTTP reply to the client
		mg_printf(conn,
			"HTTP/1.1 200 OK\r\n"
			"Content-Type: text/plain\r\n"
			"Content-Length: 2\r\n"        // Always set Content-Length
			"\r\n"
			"OK");

		// Returning non-zero tells mongoose that our function has replied to
		// the client, and mongoose should not send client any more data.
		return MG_TRUE;
	}
	else if (!strncmp(conn->uri, "/keyupload",8))
	{
		char *upload_data;
		int data_length, ofs = 0;
		char var_name[100], file_name[255];
		while ((ofs = mg_parse_multipart(conn->content + ofs, conn->content_len - ofs, var_name, sizeof(var_name), file_name, sizeof(file_name), (const char **)&upload_data, &data_length)) > 0) {
				mg_printf_data(conn, "File: %s, size: %d bytes", file_name, data_length);
		}

		// That upload_data contains more than we need. It also has the headers.
		// We'll need to strip it down to just what we want.

		if ((&data_length > 0) && (sizeof(file_name) > 0))
		{
			// MSVC doesn't yet support variable-length arrays, so chop the string the old-fashioned way
			upload_data[data_length] = '\0';

			// Now paste the stripped down paste_data..
			machine().ioport().natkeyboard().post_utf8(upload_data);
		}
		return MG_TRUE;
	}
	else if (!strncmp(conn->uri, "/cmd",4))
	{
		char cmd_name[64];
		mg_get_var(conn, "name", cmd_name, sizeof(cmd_name));

		if(!strcmp(cmd_name,"softreset"))
		{
			m_machine->schedule_soft_reset();
		}
		else if(!strcmp(cmd_name,"hardreset"))
		{
			m_machine->schedule_hard_reset();
		}
		else if(!strcmp(cmd_name,"exit"))
		{
			m_machine->schedule_exit();
		}
		else if(!strcmp(cmd_name,"togglepause"))
		{
			if (m_machine->paused())
				m_machine->resume();
		else
				m_machine->pause();
		}
		else if(!strcmp(cmd_name,"savestate"))
		{
			char cmd_val[64];
			mg_get_var(conn, "val", cmd_val, sizeof(cmd_val));
			char *filename = websanitize_statefilename(cmd_val);
			m_machine->schedule_save(filename);
		}
		else if(!strcmp(cmd_name,"loadstate"))
		{
			char cmd_val[64];
			mg_get_var(conn, "val", cmd_val, sizeof(cmd_val));
			char *filename = cmd_val;
			m_machine->schedule_load(filename);
		}
		else if(!strcmp(cmd_name,"loadauto"))
		{
			// This is here to just load the autosave and only the autosave.
			m_machine->schedule_load("auto");
		}

		// Send HTTP reply to the client
		mg_printf(conn,
				"HTTP/1.1 200 OK\r\n"
				"Content-Type: text/plain\r\n"
				"Content-Length: 2\r\n"        // Always set Content-Length
				"\r\n"
				"OK");

		// Returning non-zero tells mongoose that our function has replied to
		// the client, and mongoose should not send client any more data.
		return MG_TRUE;
	}
	else if (!strncmp(conn->uri, "/slider",7))
	{
		char cmd_id[64];
		char cmd_val[64];
		mg_get_var(conn, "id", cmd_id, sizeof(cmd_id));
		mg_get_var(conn, "val", cmd_val, sizeof(cmd_val));
		int cnt = 0;
		int id = atoi(cmd_id);
		const slider_state *curslider;
		for (curslider = machine().ui().get_slider_list(); curslider != NULL; curslider = curslider->next)
		{
			if (cnt==id)
				(*curslider->update)(machine(), curslider->arg, NULL, atoi(cmd_val));
			cnt++;
		}
		for (curslider = (slider_state*)machine().osd().get_slider_list(); curslider != NULL; curslider = curslider->next)
		{
			if (cnt==id)
				(*curslider->update)(machine(), curslider->arg, NULL, atoi(cmd_val));
			cnt++;
		}

		// Send HTTP reply to the client
		mg_printf(conn,
				"HTTP/1.1 200 OK\r\n"
				"Content-Type: text/plain\r\n"
				"Content-Length: 2\r\n"        // Always set Content-Length
				"\r\n"
				"OK");

		// Returning non-zero tells mongoose that our function has replied to
		// the client, and mongoose should not send client any more data.
		return MG_TRUE;
	}
	else if (!strncmp(conn->uri, "/screenshot.png",15))
	{
		screen_device_iterator iter(m_machine->root_device());
		screen_device *screen = iter.first();

		if (screen == NULL)
		{
			return 0;
		}

		astring fname("screenshot.png");
		emu_file file(m_machine->options().snapshot_directory(), OPEN_FLAG_WRITE | OPEN_FLAG_CREATE | OPEN_FLAG_CREATE_PATHS);
		file_error filerr = file.open(fname.c_str());

		if (filerr != FILERR_NONE)
		{
			return 0;
		}

		m_machine->video().save_snapshot(screen, file);
		astring fullpath(file.fullpath());
		file.close();
		mg_send_header(conn, "Cache-Control", "no-cache, no-store, must-revalidate");
		mg_send_header(conn, "Pragma", "no-cache");
		mg_send_header(conn, "Expires", "0");
		mg_send_file(conn, fullpath.c_str(), NULL);
		return MG_MORE; // It is important to return MG_MORE after mg_send_file!
	}
	return 0;
}
/***************************************************************************
 *   FUNCTION: cod_amr
 *
 *   PURPOSE:  Main encoder routine.
 *
 *   DESCRIPTION: This function is called every 20 ms speech frame,
 *       operating on the newly read 160 speech samples. It performs the
 *       principle encoding functions to produce the set of encoded parameters
 *       which include the LSP, adaptive codebook, and fixed codebook
 *       quantization indices (addresses and gains).
 *
 *   INPUTS:
 *       No input argument are passed to this function. However, before
 *       calling this function, 160 new speech data should be copied to the
 *       vector new_speech[]. This is a global pointer which is declared in
 *       this file (it points to the end of speech buffer minus 160).
 *
 *   OUTPUTS:
 *
 *       ana[]:     vector of analysis parameters.
 *       synth[]:   Local synthesis speech (for debugging purposes)
 *
 ***************************************************************************/
int cod_amr(
    cod_amrState *st,          /* i/o : State struct                   */
    enum Mode mode,            /* i   : AMR mode                       */
    Word16 new_speech[],       /* i   : speech input (L_FRAME)         */
    Word16 ana[],              /* o   : Analysis parameters            */
    enum Mode *usedMode,       /* o   : used mode                    */
    Word16 synth[]             /* o   : Local synthesis                */
)
{
   /* LPC coefficients */
   Word16 A_t[(MP1) * 4];      /* A(z) unquantized for the 4 subframes */
   Word16 Aq_t[(MP1) * 4];     /* A(z)   quantized for the 4 subframes */
   Word16 *A, *Aq;             /* Pointer on A_t and Aq_t              */
   Word16 lsp_new[M];
   
   /* Other vectors */
   Word16 xn[L_SUBFR];         /* Target vector for pitch search       */
   Word16 xn2[L_SUBFR];        /* Target vector for codebook search    */
   Word16 code[L_SUBFR];       /* Fixed codebook excitation            */
   Word16 y1[L_SUBFR];         /* Filtered adaptive excitation         */
   Word16 y2[L_SUBFR];         /* Filtered fixed codebook excitation   */
   Word16 gCoeff[6];           /* Correlations between xn, y1, & y2:   */
   Word16 res[L_SUBFR];        /* Short term (LPC) prediction residual */
   Word16 res2[L_SUBFR];       /* Long term (LTP) prediction residual  */

   /* Vector and scalars needed for the MR475 */
   Word16 xn_sf0[L_SUBFR];     /* Target vector for pitch search       */
   Word16 y2_sf0[L_SUBFR];     /* Filtered codebook innovation         */   
   Word16 code_sf0[L_SUBFR];   /* Fixed codebook excitation            */
   Word16 h1_sf0[L_SUBFR];     /* The impulse response of sf0          */
   Word16 mem_syn_save[M];     /* Filter memory                        */
   Word16 mem_w0_save[M];      /* Filter memory                        */
   Word16 mem_err_save[M];     /* Filter memory                        */
   Word16 sharp_save;          /* Sharpening                           */
   Word16 evenSubfr;           /* Even subframe indicator              */ 
   Word16 T0_sf0 = 0;          /* Integer pitch lag of sf0             */  
   Word16 T0_frac_sf0 = 0;     /* Fractional pitch lag of sf0          */  
   Word16 i_subfr_sf0 = 0;     /* Position in exc[] for sf0            */
   Word16 gain_pit_sf0;        /* Quantized pitch gain for sf0         */
   Word16 gain_code_sf0;       /* Quantized codebook gain for sf0      */
    
   /* Scalars */
   Word16 i_subfr, subfrNr;
   Word16 T_op[L_FRAME/L_FRAME_BY2];
   Word16 T0, T0_frac;
   Word16 gain_pit, gain_code;

   /* Flags */
   Word16 lsp_flag = 0;        /* indicates resonance in LPC filter */   
   Word16 gp_limit;            /* pitch gain limit value            */
   Word16 vad_flag;            /* VAD decision flag                 */
   Word16 compute_sid_flag;    /* SID analysis  flag                 */

   Copy(new_speech, st->new_speech, L_FRAME);

   *usedMode = mode;                     move16 ();

   /* DTX processing */
   if (st->dtx)
   {  /* no test() call since this if is only in simulation env */
      /* Find VAD decision */

#ifdef  VAD2
      vad_flag = vad2 (st->new_speech,    st->vadSt);
      vad_flag = vad2 (st->new_speech+80, st->vadSt) || vad_flag;      logic16();
#else
      vad_flag = vad1(st->vadSt, st->new_speech);     
#endif
      fwc ();                 /* function worst case */

      /* NB! usedMode may change here */
      compute_sid_flag = tx_dtx_handler(st->dtx_encSt,
                                        vad_flag, 
                                        usedMode);
   }
   else 
   {
      compute_sid_flag = 0;              move16 ();
   }
   
   /*------------------------------------------------------------------------*
    *  - Perform LPC analysis:                                               *
    *       * autocorrelation + lag windowing                                *
    *       * Levinson-durbin algorithm to find a[]                          *
    *       * convert a[] to lsp[]                                           *
    *       * quantize and code the LSPs                                     *
    *       * find the interpolated LSPs and convert to a[] for all          *
    *         subframes (both quantized and unquantized)                     *
    *------------------------------------------------------------------------*/
   
   /* LP analysis */
   lpc(st->lpcSt, mode, st->p_window, st->p_window_12k2, A_t);

   fwc ();                 /* function worst case */

   /* From A(z) to lsp. LSP quantization and interpolation */
   lsp(st->lspSt, mode, *usedMode, A_t, Aq_t, lsp_new, &ana);
   
   fwc ();                 /* function worst case */

   /* Buffer lsp's and energy */
   dtx_buffer(st->dtx_encSt,
	      lsp_new,
	      st->new_speech);

   /* Check if in DTX mode */
   test();
   if (sub(*usedMode, MRDTX) == 0)
   {
      dtx_enc(st->dtx_encSt,
              compute_sid_flag,
              st->lspSt->qSt, 
              st->gainQuantSt->gc_predSt,
              &ana);
      
      Set_zero(st->old_exc,    PIT_MAX + L_INTERPOL);
      Set_zero(st->mem_w0,     M);
      Set_zero(st->mem_err,    M);
      Set_zero(st->zero,       L_SUBFR);
      Set_zero(st->hvec,       L_SUBFR);    /* set to zero "h1[-L_SUBFR..-1]" */
      /* Reset lsp states */
      lsp_reset(st->lspSt);
      Copy(lsp_new, st->lspSt->lsp_old, M);
      Copy(lsp_new, st->lspSt->lsp_old_q, M);
      
      /* Reset clLtp states */
      cl_ltp_reset(st->clLtpSt);
      st->sharp = SHARPMIN;       move16 ();
   }
   else
   {
       /* check resonance in the filter */
      lsp_flag = check_lsp(st->tonStabSt, st->lspSt->lsp_old);  move16 ();
   }
   
   /*----------------------------------------------------------------------*
    * - Find the weighted input speech w_sp[] for the whole speech frame   *
    * - Find the open-loop pitch delay for first 2 subframes               *
    * - Set the range for searching closed-loop pitch in 1st subframe      *
    * - Find the open-loop pitch delay for last 2 subframes                *
    *----------------------------------------------------------------------*/

#ifdef VAD2
   if (st->dtx)
   {  /* no test() call since this if is only in simulation env */
       st->vadSt->L_Rmax = 0;			move32 ();
       st->vadSt->L_R0 = 0;			move32 ();
   }
#endif
   for(subfrNr = 0, i_subfr = 0; 
       subfrNr < L_FRAME/L_FRAME_BY2; 
       subfrNr++, i_subfr += L_FRAME_BY2)
   {
      /* Pre-processing on 80 samples */
      pre_big(mode, gamma1, gamma1_12k2, gamma2, A_t, i_subfr, st->speech,
              st->mem_w, st->wsp);
    
      test (); test ();
      if ((sub(mode, MR475) != 0) && (sub(mode, MR515) != 0))
      {
         /* Find open loop pitch lag for two subframes */
         ol_ltp(st->pitchOLWghtSt, st->vadSt, mode, &st->wsp[i_subfr],
                &T_op[subfrNr], st->old_lags, st->ol_gain_flg, subfrNr,
                st->dtx);
      }
   }
   fwc ();                 /* function worst case */

   test (); test();
   if ((sub(mode, MR475) == 0) || (sub(mode, MR515) == 0))
   {
      /* Find open loop pitch lag for ONE FRAME ONLY */
      /* search on 160 samples */
      
      ol_ltp(st->pitchOLWghtSt, st->vadSt, mode, &st->wsp[0], &T_op[0],
             st->old_lags, st->ol_gain_flg, 1, st->dtx);
      T_op[1] = T_op[0];                                     move16 ();
   }         
   fwc ();                 /* function worst case */
   
#ifdef VAD2
   if (st->dtx)
   {  /* no test() call since this if is only in simulation env */
      LTP_flag_update(st->vadSt, mode);
   }
#endif

#ifndef VAD2
   /* run VAD pitch detection */
   if (st->dtx)
   {  /* no test() call since this if is only in simulation env */
      vad_pitch_detection(st->vadSt, T_op);
   } 
#endif
   fwc ();                 /* function worst case */

   if (sub(*usedMode, MRDTX) == 0)
   {
      goto the_end;
   }
   
   /*------------------------------------------------------------------------*
    *          Loop for every subframe in the analysis frame                 *
    *------------------------------------------------------------------------*
    *  To find the pitch and innovation parameters. The subframe size is     *
    *  L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times.               *
    *     - find the weighted LPC coefficients                               *
    *     - find the LPC residual signal res[]                               *
    *     - compute the target signal for pitch search                       *
    *     - compute impulse response of weighted synthesis filter (h1[])     *
    *     - find the closed-loop pitch parameters                            *
    *     - encode the pitch dealy                                           *
    *     - update the impulse response h1[] by including fixed-gain pitch   *
    *     - find target vector for codebook search                           *
    *     - codebook search                                                  *
    *     - encode codebook address                                          *
    *     - VQ of pitch and codebook gains                                   *
    *     - find synthesis speech                                            *
    *     - update states of weighting filter                                *
    *------------------------------------------------------------------------*/

   A = A_t;      /* pointer to interpolated LPC parameters */
   Aq = Aq_t;    /* pointer to interpolated quantized LPC parameters */

   evenSubfr = 0;                                                  move16 ();
   subfrNr = -1;                                                   move16 ();
   for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
   {
      subfrNr = add(subfrNr, 1);
      evenSubfr = sub(1, evenSubfr);

      /* Save states for the MR475 mode */
      test(); test();
      if ((evenSubfr != 0) && (sub(*usedMode, MR475) == 0))
      {
         Copy(st->mem_syn, mem_syn_save, M);
         Copy(st->mem_w0, mem_w0_save, M);         
         Copy(st->mem_err, mem_err_save, M);         
         sharp_save = st->sharp;
      }
      
      /*-----------------------------------------------------------------*
       * - Preprocessing of subframe                                     *
       *-----------------------------------------------------------------*/
      test();
      if (sub(*usedMode, MR475) != 0)
      {
         subframePreProc(*usedMode, gamma1, gamma1_12k2,
                         gamma2, A, Aq, &st->speech[i_subfr],
                         st->mem_err, st->mem_w0, st->zero,
                         st->ai_zero, &st->exc[i_subfr],
                         st->h1, xn, res, st->error);
      }
      else
      { /* MR475 */
         subframePreProc(*usedMode, gamma1, gamma1_12k2, 
                         gamma2, A, Aq, &st->speech[i_subfr],
                         st->mem_err, mem_w0_save, st->zero,
                         st->ai_zero, &st->exc[i_subfr],
                         st->h1, xn, res, st->error);

         /* save impulse response (modified in cbsearch) */
         test ();
         if (evenSubfr != 0)
         {
             Copy (st->h1, h1_sf0, L_SUBFR);
         }
      }
      
      /* copy the LP residual (res2 is modified in the CL LTP search)    */
      Copy (res, res2, L_SUBFR);

      fwc ();                 /* function worst case */
    
      /*-----------------------------------------------------------------*
       * - Closed-loop LTP search                                        *
       *-----------------------------------------------------------------*/
      cl_ltp(st->clLtpSt, st->tonStabSt, *usedMode, i_subfr, T_op, st->h1, 
             &st->exc[i_subfr], res2, xn, lsp_flag, xn2, y1, 
             &T0, &T0_frac, &gain_pit, gCoeff, &ana,
             &gp_limit);

      /* update LTP lag history */
      move16 (); test(); test ();
      if ((subfrNr == 0) && (st->ol_gain_flg[0] > 0))
      {
         st->old_lags[1] = T0;     move16 ();
      }
      
      move16 (); test(); test ();
      if ((sub(subfrNr, 3) == 0) && (st->ol_gain_flg[1] > 0))
      {
         st->old_lags[0] = T0;     move16 ();
      }      

      fwc ();                 /* function worst case */
      
      /*-----------------------------------------------------------------*
       * - Inovative codebook search (find index and gain)               *
       *-----------------------------------------------------------------*/
      cbsearch(xn2, st->h1, T0, st->sharp, gain_pit, res2, 
               code, y2, &ana, *usedMode, subfrNr);
      
      fwc ();                 /* function worst case */
    
      /*------------------------------------------------------*
       * - Quantization of gains.                             *
       *------------------------------------------------------*/
      gainQuant(st->gainQuantSt, *usedMode, res, &st->exc[i_subfr], code,
                xn, xn2,  y1, y2, gCoeff, evenSubfr, gp_limit,
                &gain_pit_sf0, &gain_code_sf0,
                &gain_pit, &gain_code, &ana);
      
      fwc ();                 /* function worst case */

      /* update gain history */
      update_gp_clipping(st->tonStabSt, gain_pit);
      
      test(); 
      if (sub(*usedMode, MR475) != 0)
      {
         /* Subframe Post Porcessing */
         subframePostProc(st->speech, *usedMode, i_subfr, gain_pit,
                          gain_code, Aq, synth, xn, code, y1, y2, st->mem_syn,
                          st->mem_err, st->mem_w0, st->exc, &st->sharp);
      }
      else
      {
         test();
         if (evenSubfr != 0)
         {
            i_subfr_sf0 = i_subfr;             move16 ();
            Copy(xn, xn_sf0, L_SUBFR);
            Copy(y2, y2_sf0, L_SUBFR);          
            Copy(code, code_sf0, L_SUBFR);
            T0_sf0 = T0;                       move16 ();
            T0_frac_sf0 = T0_frac;             move16 ();
            
            /* Subframe Post Porcessing */
            subframePostProc(st->speech, *usedMode, i_subfr, gain_pit,
                             gain_code, Aq, synth, xn, code, y1, y2,
                             mem_syn_save, st->mem_err, mem_w0_save,
                             st->exc, &st->sharp);
            st->sharp = sharp_save;                         move16();
         }
         else
         {
            /* update both subframes for the MR475 */
            
            /* Restore states for the MR475 mode */
            Copy(mem_err_save, st->mem_err, M);         
            
            /* re-build excitation for sf 0 */
            Pred_lt_3or6(&st->exc[i_subfr_sf0], T0_sf0, T0_frac_sf0,
                         L_SUBFR, 1);
            Convolve(&st->exc[i_subfr_sf0], h1_sf0, y1, L_SUBFR);
            
            Aq -= MP1;
            subframePostProc(st->speech, *usedMode, i_subfr_sf0,
                             gain_pit_sf0, gain_code_sf0, Aq,
                             synth, xn_sf0, code_sf0, y1, y2_sf0,
                             st->mem_syn, st->mem_err, st->mem_w0, st->exc,
                             &sharp_save); /* overwrites sharp_save */
            Aq += MP1;
            
            /* re-run pre-processing to get xn right (needed by postproc) */
            /* (this also reconstructs the unsharpened h1 for sf 1)       */
            subframePreProc(*usedMode, gamma1, gamma1_12k2,
                            gamma2, A, Aq, &st->speech[i_subfr],
                            st->mem_err, st->mem_w0, st->zero,
                            st->ai_zero, &st->exc[i_subfr],
                            st->h1, xn, res, st->error);
            
            /* re-build excitation sf 1 (changed if lag < L_SUBFR) */
            Pred_lt_3or6(&st->exc[i_subfr], T0, T0_frac, L_SUBFR, 1);
            Convolve(&st->exc[i_subfr], st->h1, y1, L_SUBFR);
            
            subframePostProc(st->speech, *usedMode, i_subfr, gain_pit,
                             gain_code, Aq, synth, xn, code, y1, y2,
                             st->mem_syn, st->mem_err, st->mem_w0,
                             st->exc, &st->sharp);
         }
      }      
               
      fwc ();                 /* function worst case */
          
      A += MP1;    /* interpolated LPC parameters for next subframe */
      Aq += MP1;
   }

   Copy(&st->old_exc[L_FRAME], &st->old_exc[0], PIT_MAX + L_INTERPOL);
   
the_end:
   
   /*--------------------------------------------------*
    * Update signal for next frame.                    *
    *--------------------------------------------------*/
   Copy(&st->old_wsp[L_FRAME], &st->old_wsp[0], PIT_MAX);
   
   Copy(&st->old_speech[L_FRAME], &st->old_speech[0], L_TOTAL - L_FRAME);

   fwc ();                 /* function worst case */
       
   return 0;
}
double LsSol::MinimumEigenValueVector_InvPower(
		LsSol::CLinearSystem& ls , 
		LsSol::CPreconditioner& pls, 
		const std::vector<unsigned int>& aIdVec, 
		////////////////
		unsigned int itr_invp,		// 逆べき乗法の最大反復回数
		unsigned int itr_lssol,		// ICCG法の最大反復回数
		double conv_ratio_lssol,	// ICCG法の収束基準の相対残差
		int& iflag_conv )	// 0:正常に終了 1:ICCGが収束しなかった
{
	const unsigned int max_itr_invp = itr_invp;
	if( ls.GetTmpVectorArySize() < 3 ){ ls.ReSizeTmpVecSolver(3); }

	const int ires = -1;
	const int iupd = -2;
	const int itmp = 2;

	// 無視ベクトルのグラムシュミットの直交化
	for(unsigned int iivec=0;iivec<aIdVec.size();iivec++){
		const unsigned int ivec0=aIdVec[iivec];
		for(unsigned int jivec=0;jivec<iivec;jivec++){
			const unsigned int jvec0=aIdVec[jivec];
			const double dot = ls.DOT(ivec0,jvec0);
			ls.AXPY(-dot,jvec0,ivec0);
		}
		const double sq_normx = ls.DOT(ivec0,ivec0);
		const double normx = sqrt(sq_normx);
		ls.SCAL(1.0/normx,ivec0);
	}

	// 更新から無視ベクトル成分を引く
	for(unsigned int iivec=0;iivec<aIdVec.size();iivec++){
		const unsigned int ivec0=aIdVec[iivec];
		const double dot0 = ls.DOT(ivec0,iupd);
		ls.AXPY(-dot0,ivec0,iupd);
	}
	{	// 更新を正規化
		const double sq_normx = ls.DOT(iupd,iupd);
		const double normx = sqrt(sq_normx);
		ls.SCAL(1.0/normx,iupd);
	}

    double min_eigen;
	for(itr_invp=0;itr_invp<max_itr_invp;itr_invp++)
	{
		ls.COPY(iupd,ires);
		ls.COPY(iupd,itmp);
		{	// 行列の逆を求める
			double conv_ratio = conv_ratio_lssol;
			unsigned int num_iter = itr_lssol;
            LsSol::CLinearSystemPreconditioner lsp(ls,pls);
            bool res = LsSol::Solve_PCG(conv_ratio, num_iter, lsp);
//			std::cout << "PCG iter : " << num_iter << " " << conv_ratio << std::endl;
			if( !res ){ 
				iflag_conv = 1;	// ICCGが収束しなかったフラグ
				return 0; 
			}
		}
		// 無視ベクトルの成分を引く
		for(unsigned int iivec=0;iivec<aIdVec.size();iivec++){
			const unsigned int ivec0=aIdVec[iivec];
			const double dot0 = ls.DOT(ivec0,iupd);
			ls.AXPY(-dot0,ivec0,iupd);
		}
		// レリー積の計算
		min_eigen = ls.DOT(itmp,iupd);
		// 更新ベクトルを正規化
		const double norm0 = sqrt(ls.DOT(iupd,iupd));			
		ls.SCAL(1.0/norm0,iupd);
	}
	iflag_conv = 0;	// 正常終了フラグ
	return  1.0 / min_eigen;
}