int retrieveData(std::string filename, std::stringstream &ss) { std::ifstream file(filename.c_str(), std::ios::binary); int type = file.peek(); if (type == 0x0A) { ss.flush(); ss << file; return 0; } if (type == 0x1F) { int ret = ungzip(file, ss); return ret; } std::cerr << "[DEBUG] Schematic compression type not recognize : " << type << std::endl; return 1; }
bool QDictWidget::ensureDictFile() { if(dictFile.exists()) { return true; } QDir baseDir("/media/card"); if(!baseDir.exists()) { baseDir = QDir::home(); } QDir dictDir(baseDir.path() + "/.qgcide"); if(!dictDir.exists()) { if(!baseDir.mkdir(".qgcide")) { showErr(tr("Unable to create dictionary dir ") + dictDir.absolutePath()); return false; } } dictFile.setFileName(dictDir.absolutePath() + "/gcide-entries.xml"); if(dictFile.exists()) { return true; } if(QMessageBox::question(this, tr("English dictionary"), tr("Dictionary must be downloaded. Please make sure you have internet connection and press yes to confirm download (14MB)."), QMessageBox::Yes, QMessageBox::No) == QMessageBox::No) { return false; } progress->setVisible(true); QString gzFile = dictFile.fileName() + ".gz"; if(!download("http://dl.linuxphone.ru/openmoko/qtmoko/packages/gcide-entries.xml.gz", gzFile, "gcide-entries.xml.gz")) { return false; } if(!ungzip(gzFile)) { return false; } progress->setVisible(false); return true; }
int retrieveData(std::string filename, std::stringstream &ss) { std::ifstream file(filename.c_str(), std::ios::binary); int type = file.peek(); if (type == 0x0A) { ss.flush(); std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), std::ostreambuf_iterator<char>(ss)); return 0; } if (type == 0x1F) { int ret = ungzip(file, ss); return ret; } std::cerr << "[ERROR] Schematic compression type not recognize : " << type << std::endl; return 1; }
bool CMyHttp::parseResponse(CMyHttpResonse* R) { const char* kHttpError = "HTTP报文解析遇到错误, 请向作者反馈信息!"; char c; //读HTTP版本 while(recv(&c,1) && c!=' '){ R->version += c; } //读状态号 while(recv(&c,1) && c!= ' '){ R->state += c; } //读状态说明 while(recv(&c,1) && c!='\r'){ R->comment += c; } // \r已读,还有一个\n recv(&c,1); if(c!='\n') throw kHttpError; //读键值对 for(;;){ std::string k,v; //读键 recv(&c,1); if(c=='\r'){ recv(&c,1); assert(c=='\n'); if(c!='\n') throw kHttpError; break; } else k += c; while(recv(&c,1) && c!=':'){ k += c; } //读值 recv(&c,1); assert(c==' '); if(c!=' ') throw kHttpError; while(recv(&c,1) && c!='\r'){ v += c; } // 跳过 \n recv(&c,1); assert(c=='\n'); if(c!='\n') throw kHttpError; //结果 R->heads[k] = v; } //如果有数据返回的话, 数据从这里开始 if(R->state != "200"){ static std::string t; //Oops!!! t = R->version; t += " "; t += R->state; t += " "; t += R->comment; throw t.c_str(); } bool bContentLength,bTransferEncoding; bContentLength = R->heads.count("Content-Length")!=0; bTransferEncoding = R->heads.count("Transfer-Encoding")!=0; if(bContentLength){ int len = atoi(R->heads["Content-Length"].c_str()); char* data = new char[len+1]; //多余的1字节,使结果为'\0',但返回实际长度 memset(data,0,len+1); //这句可以不要 recv(data,len); bool bGzipped = R->heads["Content-Encoding"] == "gzip"; if(bGzipped){ CMyByteArray gzip; ungzip(data, len, &gzip); gzip.Duplicate(&R->data, true); R->size = gzip.GetSize(); return true; } R->data = data; R->size = len; //没有多余的1字节 return true; } if(bTransferEncoding){ //参考:http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 if(R->heads["Transfer-Encoding"] != "chunked") throw "未处理的传送编码类型!"; auto char_val = [](char ch) { int val = 0; if(ch>='0' && ch<='9') val = ch-'0'+ 0; if(ch>='a' && ch<='f') val = ch-'a'+10; if(ch>='A' && ch<='F') val = ch-'A'+10; return val; }; int len; CMyByteArray bytes; for(;;){ len = 0; //读取chunked数据长度 while(recv(&c,1) && c!=' ' && c!='\r'){ len = len*16 + char_val(c); } //结束当前chunked块长度部分 if(c!='\r'){ while(recv(&c,1) && c!='\r') ; } recv(&c,1); assert(c=='\n'); if(c!='\n') throw kHttpError; //块长度为零,chunked数据结束 if(len == 0){ break; } //以下开始读取chunked数据块数据部分 + \r\n char* p = new char[len+2]; recv(p,len+2); assert(p[len+0] == '\r'); assert(p[len+1] == '\n'); if(p[len+0]!='\r') throw kHttpError; if(p[len+1]!='\n') throw kHttpError; bytes.Write(p,len); delete[] p; } //chunked 数据读取结束 recv(&c,1); assert(c == '\r'); if(c!='\r') throw kHttpError; recv(&c,1); assert(c == '\n'); if(c!='\n') throw kHttpError; bool bGzipped = R->heads["Content-Encoding"] == "gzip"; if(bGzipped){ CMyByteArray gzip; if(!ungzip(bytes.GetData(), bytes.GetSize(), &gzip)) throw "HTTP报文Gzip解压时遇到错误! 请反馈!"; gzip.Duplicate(&R->data, true); R->size = gzip.GetSize(); return true; } bytes.Duplicate(&R->data, true); R->size = bytes.GetSize(); return true; } //如果到了这里, 说明没有消息数据 R->data = 0; R->size = 0; return true; }
gint rcv_response(Connection *con, Response **rp) { if(con == NULL || rp == NULL){ return -1; } Response *r = NULL; gsize need_to_read = G_MAXSIZE; //we don't know the length of //the data that need to read. //store the data that has read GString *data = g_string_new(NULL); gchar *tev = NULL; //the transfer encoding gint cl = -1; //the content length gboolean gotcl = FALSE; //got content lenght or not //so, we should do sth gboolean ischunked = FALSE; gint chunkbegin = 0; gint chunklen = -1; //the chunk length gint totalchunklen = 0; gboolean conclose = FALSE; //connection close gboolean gotallheaders = FALSE; gboolean isgzip = FALSE; #define BUFSIZE 500 gchar buf[BUFSIZE]; GIOStatus status; GError *err = NULL; gsize bytes_read = 0; gsize want_read = 0; //g_debug("Begin to read data.(%s, %d)", __FILE__, __LINE__); while(need_to_read > 0){ want_read = BUFSIZE < need_to_read ? BUFSIZE : need_to_read; status = g_io_channel_read_chars(con -> channel, buf , want_read, &bytes_read, &err); switch(status) { case G_IO_STATUS_NORMAL: //read success. need_to_read -= bytes_read; break; case G_IO_STATUS_EOF: if(conclose){ /* * The Connection header is close. * The server will close the conntion after * send all the data. */ //we got all the data. g_debug("Server close the connection. " "data has gotten: %u(%s, %d)" , (unsigned int)bytes_read ,__FILE__, __LINE__); break; } break; case G_IO_STATUS_ERROR: g_warning("Read data ERROR!! code:%d msg:%s(%s, %d)" , err -> code, err -> message , __FILE__, __LINE__); g_string_free(data, TRUE); g_error_free(err); return -1; case G_IO_STATUS_AGAIN: g_warning("Channel temporarily unavailable."); break; default: g_warning("Unknown io status!(%s, %d)", __FILE__, __LINE__); g_string_free(data, TRUE); return -1; } g_string_append_len(data, buf, bytes_read); if(!gotallheaders && g_strstr_len(data -> str, data -> len , "\r\n\r\n") != NULL){ //We have gotten all the headers; r = response_new_parse(data); g_string_truncate(data, 0); gotallheaders = TRUE; //Find the Content-Length 's value gchar *clen = response_get_header_chars(r, "Content-Length"); if(clen != NULL){ gotcl = TRUE; //calculate the message we have not read. cl = atoi(clen); g_debug("Content-Length: %d.(%s, %d)" , cl, __FILE__, __LINE__); need_to_read = cl - r -> msg -> len; //g_debug("Message need to read %d bytes." // "(%s, %d)", need_to_read // , __FILE__, __LINE__); } //Find the Transfering-Encoding 's value tev = response_get_header_chars(r, "Transfer-Encoding"); if(tev != NULL && g_strstr_len(tev, -1, "chunked") != NULL){ g_debug("The message body is chunked.(%s, %d)" , __FILE__, __LINE__); ischunked = TRUE; //copy the message back to data g_string_truncate(data , 0); g_string_append_len(data, r -> msg -> str, r -> msg -> len); g_string_truncate(r -> msg, 0); chunklen = -1; chunkbegin = 0; /* * We will read the data according to the * chunked */ need_to_read = G_MAXSIZE; } gchar *connection = response_get_header_chars(r , "Connection"); if(connection != NULL){ //g_debug("Connection: %s (%s, %d)", connection // , __FILE__, __LINE__); if(g_strstr_len(connection, -1, "close") != NULL){ conclose = TRUE; } } gchar *ce = response_get_header_chars(r , "Content-Encoding"); if(ce != NULL){ g_debug("Content-Encoding: %s (%s, %d)", ce , __FILE__, __LINE__); if(g_strstr_len(ce, -1, "gzip") != NULL){ isgzip = TRUE; } } } if(gotallheaders && ischunked){ while(TRUE){ if(chunkbegin + 2 > data -> len){ break; } gchar *tmp = g_strstr_len(data -> str + chunkbegin , data -> len - chunkbegin, "\r\n"); if(tmp != NULL){ /* * we got the length */ *tmp = '\0'; gchar *end = NULL; chunklen = strtol(data -> str + chunkbegin, &end, 16); *tmp = '\r'; if(end != data -> str + chunkbegin && chunklen == 0){ // Get all data need_to_read = -1; break; } }else{ /* Format data->len so that it can be compiled on x86-64 machine */ g_debug("More chunks... Begin %d len %d %s(%s, %d)" , chunkbegin, (int)data -> len , data -> str + chunkbegin , __FILE__, __LINE__); break; } if(chunklen != -1 && (tmp - data -> str) + 2 + chunklen <= data -> len){ totalchunklen += chunklen; chunkbegin = tmp - data -> str; chunkbegin += 2; g_string_append_len(r -> msg, data -> str + chunkbegin , chunklen); chunkbegin += chunklen + 2; chunklen = -1; }else{ break; } } } if(bytes_read < want_read){ break; } }//end of while(need_to_read > 0)... //g_debug("Read all data.(%s, %d)", __FILE__, __LINE__); if(r == NULL){ //we do not find "\r\n\r\n". //Should not happen. g_warning("Read all data, but not find all headers.!" "(%s, %d)", __FILE__, __LINE__); g_string_free(data, TRUE); return -1; } if(!ischunked){ //copy the message to r -> msg; g_string_append_len(r -> msg, data -> str, data -> len); }else{ //g_debug("Total chunk length: %d (%s, %d)", totalchunklen // , __FILE__, __LINE__); } #undef BUFSIZE if(gotcl && r -> msg -> len != cl && tev == NULL){ g_warning("No read all the message!! content length:%d" " msg -> len: %u. (%s, %d)" , cl, (unsigned int)r -> msg -> len, __FILE__, __LINE__); } if(isgzip){ /* * ungzip the data */ GString *out = g_string_new(NULL); ungzip(r -> msg, out); g_string_truncate(r -> msg, 0); g_string_append(r -> msg, out -> str); g_string_free(out, TRUE); g_debug("Ungzip data. After len %u.(%s, %d)" , (unsigned int)r -> msg -> len, __FILE__, __LINE__); } *rp = r; g_string_free(data, TRUE); return 0; }
static int rjson_parse_message(lua_State *lua) { lua_getfield(lua, LUA_REGISTRYINDEX, LSB_HEKA_THIS_PTR); lsb_heka_sandbox *hsb = static_cast<lsb_heka_sandbox *>(lua_touserdata(lua, -1)); lua_pop(lua, 1); // remove this ptr if (!hsb) { return luaL_error(lua, "parse_message() invalid " LSB_HEKA_THIS_PTR); } int n = lua_gettop(lua); int idx = 1; const lsb_heka_message *msg = NULL; if (lsb_heka_get_type(hsb) == 'i') { luaL_argcheck(lua, n >= 2 && n <= 4, 0, "invalid number of arguments"); heka_stream_reader *hsr = static_cast<heka_stream_reader *> (luaL_checkudata(lua, 1, LSB_HEKA_STREAM_READER)); msg = &hsr->msg; idx = 2; } else { luaL_argcheck(lua, n >= 1 && n <= 3, 0, "invalid number of arguments"); const lsb_heka_message *hm = lsb_heka_get_message(hsb); if (!hm || !hm->raw.s) { return luaL_error(lua, "parse_message() no active message"); } msg = hm; } lsb_const_string json = read_message(lua, idx, msg); if (!json.s) return luaL_error(lua, "field not found"); char *inflated = NULL; #ifdef HAVE_ZLIB // automatically handle gzipped strings (optimization for Mozilla telemetry // messages) if (json.len > 2) { if (json.s[0] == 0x1f && (unsigned char)json.s[1] == 0x8b) { size_t mms = (size_t)lua_tointeger(lua, lua_upvalueindex(1)); inflated = ungzip(json.s, json.len, mms, NULL); if (!inflated) return luaL_error(lua, "ungzip failed"); } } #endif rjson *j = static_cast<rjson *>(lua_newuserdata(lua, sizeof*j)); j->doc = new rj::Document; j->val = NULL; j->insitu = inflated; j->refs = new std::set<rj::Value *>; luaL_getmetatable(lua, mozsvc_rjson); lua_setmetatable(lua, -2); if (!j->doc || !j->refs) { lua_pushstring(lua, "memory allocation failed"); return lua_error(lua); } bool err = false; if (j->insitu) { if (j->doc->ParseInsitu<rj::kParseStopWhenDoneFlag>(j->insitu) .HasParseError()) { err = true; lua_pushfstring(lua, "failed to parse offset:%f %s", (lua_Number)j->doc->GetErrorOffset(), rj::GetParseError_En(j->doc->GetParseError())); } } else { rj::MemoryStream ms(json.s, json.len); if (j->doc->ParseStream<0, rj::UTF8<> >(ms).HasParseError()) { err = true; lua_pushfstring(lua, "failed to parse offset:%f %s", (lua_Number)j->doc->GetErrorOffset(), rj::GetParseError_En(j->doc->GetParseError())); } } if (err) return lua_error(lua); j->refs->insert(j->doc); return 1; }