コード例 #1
0
ファイル: http.cpp プロジェクト: githubber/bibledit
// Takes data POSTed from the browser, and parses it.
void http_parse_post (string content, Webserver_Request * request)
{
  // Read and parse the POST data.
  try {
    if (!content.empty ()) {
      // Standard parse.
      bool urlencoded = request->content_type.find ("urlencoded") != string::npos;
      ParseWebData::WebDataMap dataMap;
      ParseWebData::parse_post_data (content, request->content_type, dataMap);
      for (ParseWebData::WebDataMap::const_iterator iter = dataMap.begin(); iter != dataMap.end(); ++iter) {
        string value;
        if (urlencoded) value = filter_url_urldecode ((*iter).second.value);
        else value = (*iter).second.value;
        request->post [(*iter).first] = value;
      }
      // Special case: Extract the filename in case of a file upload.
      if (content.length () > 1000) content.resize (1000);
      if (content.find ("filename=") != string::npos) {
        vector <string> lines = filter_string_explode (content, '\n');
        for (auto & line : lines) {
          if (line.find ("Content-Disposition") == string::npos) continue;
          size_t pos = line.find ("filename=");
          if (pos == string::npos) continue;
          line = line.substr (pos + 10);
          line = filter_string_trim (line);
          line.pop_back ();
          request->post ["filename"] = line;
        }
      }
    }
  } catch (...) {
  }
}
コード例 #2
0
ファイル: listing.cpp プロジェクト: bibledit/bibledit-windows
string index_listing (void * webserver_request, string url)
{
    string page;
    page = Assets_Page::header ("Bibledit", webserver_request);
    // No breadcrumbs because the user can arrive here from more than one place.
    Assets_View view;
    url = filter_url_urldecode (url);
    url = filter_url_create_path ("", url);
    url = filter_string_str_replace ("\\", "/", url);
    view.set_variable ("url", url);
    string parent = filter_url_dirname_web (url);
    if (parent.length () > 1) {
        view.enable_zone ("parent");
        view.set_variable ("parent", parent);
    }
    string directory = filter_url_create_root_path (url);
    if (!file_or_dir_exists (directory) || filter_url_is_dir (directory)) {
        vector <string> files = filter_url_scandir (directory);
        for (auto & file : files) {
            string path = filter_url_create_path (directory, file);
            string line;
            line.append ("<tr>");
            line.append ("<td>");
            line.append ("<a href=\"" + filter_url_create_path (url, file) + "\">");
            line.append (file);
            line.append ("</a>");
            line.append ("</td>");
            line.append ("<td>");
            if (!filter_url_is_dir (path)) {
                line.append (convert_to_string (filter_url_filesize (path)));
            }
            line.append ("</td>");
            line.append ("</tr>");
            file = line;
        }
        string listing = filter_string_implode (files, "\n");
        if (listing.empty ()) listing = translate ("No files in this folder");
        else {
            listing.insert (0, "<table>");
            listing.append ("</table>");
        }
        view.set_variable ("listing", listing);
    } else {
        string filename = filter_url_create_root_path (url);
        return filter_url_file_get_contents (filename);
    }
    page += view.render ("index", "listing");
    page += Assets_Page::footer ();
    return page;
}
コード例 #3
0
ファイル: http.cpp プロジェクト: githubber/bibledit
// This function serves a file and enables caching by the browser.
void http_serve_cache_file (Webserver_Request * request)
{
  // Full path to the file.
  string filename = filter_url_create_root_path (filter_url_urldecode (request->get));
  
  // File size for browser caching.
  int size = filter_url_filesize (filename);
  request->etag = "\"" + convert_to_string (size) + "\"";

  // Deal with situation that the file in the browser's cache is up to date.
  // https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching
  if (request->etag == request->if_none_match) {
    request->response_code = 304;
    return;
  }
  
  // Get file's contents.
  request->reply = filter_url_file_get_contents (filename);
}
コード例 #4
0
ファイル: http.cpp プロジェクト: githubber/bibledit
/*
 The http headers from a browser could look as follows:
 
 GET /index/page HTTP/1.1
 Host: localhost:8080
 Connection: keep-alive
 User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.36
 Accept-Language: sn,en-US;q=0.8,en;q=0.6
 
 The function extracts the relevant information from the headers.
 
 It returns true if a header was (or could have been) parsed.
 */
bool http_parse_header (string header, Webserver_Request * request)
{
  // Clean the header line.
  header = filter_string_trim (header);
  
  // Deal with a header like this: GET /css/stylesheet.css?1.0.1 HTTP/1.1
  // Or like this: POST /session/login?request= HTTP/1.1
  bool get = false;
  if (header.substr (0, 3) == "GET") get = true;
  if (header.substr (0, 4) == "POST") {
    get = true;
    request->is_post = true;
  }
  if (get) {
    string query_data;
    vector <string> get = filter_string_explode (header, ' ');
    if (get.size () >= 2) {
      request->get = get [1];
      // The GET or POST value may be, for example: stylesheet.css?1.0.1.
      // Split it up into two parts: The part before the ?, and the part after the ?.
      istringstream issquery (request->get);
      int counter = 0;
      string s;
      while (getline (issquery, s, '?')) {
        if (counter == 0) request->get = s;
        if (counter == 1) query_data = s;
        counter++;
      }
    }
    // Read and parse the GET data.
    try {
      if (!query_data.empty ()) {
        ParseWebData::WebDataMap dataMap;
        ParseWebData::parse_get_data (query_data, dataMap);
        for (ParseWebData::WebDataMap::const_iterator iter = dataMap.begin(); iter != dataMap.end(); ++iter) {
          request->query [(*iter).first] = filter_url_urldecode ((*iter).second.value);
        }
      }
    } catch (...) {
    }
  }
  
  // Extract the User-Agent from a header like this:
  // User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36
  if (header.substr (0, 10) == "User-Agent") {
    request->user_agent = header.substr (12);
  }
  
  // Extract the Accept-Language from a header like this:
  // Accept-Language: sn,en-US;q=0.8,en;q=0.6
  if (header.substr (0, 15) == "Accept-Language") {
    request->accept_language = header.substr (17);
  }
  
  // Extract the Host from a header like this:
  // Host: 192.168.1.139:8080
  if (header.substr (0, 4) == "Host") {
    request->host = header.substr (6);
    vector <string> bits = filter_string_explode (request->host, ':');
    if (!bits.empty ()) request->host = bits [0];
  }
  
  // Extract the Content-Type from a header like this:
  // Content-Type: application/x-www-form-urlencoded
  if (header.substr (0, 12) == "Content-Type") {
    request->content_type = header.substr (14);
  }

  // Extract the Content-Length from a header.
  if (header.substr (0, 14) == "Content-Length") {
    request->content_length = convert_to_int (header.substr (16));
  }
  
  // Extract the ETag from a header.
  if (header.substr (0, 13) == "If-None-Match") {
    request->if_none_match = header.substr (15);
  }
  
  // Something was or could have been parsed if the header contained something.
  return !header.empty ();
}