svn_error_t * svn_client_blame5(const char *target, const svn_opt_revision_t *peg_revision, const svn_opt_revision_t *start, const svn_opt_revision_t *end, const svn_diff_file_options_t *diff_options, svn_boolean_t ignore_mime_type, svn_boolean_t include_merged_revisions, svn_client_blame_receiver3_t receiver, void *receiver_baton, svn_client_ctx_t *ctx, apr_pool_t *pool) { struct file_rev_baton frb; svn_ra_session_t *ra_session; svn_revnum_t start_revnum, end_revnum; struct blame *walk, *walk_merged = NULL; apr_pool_t *iterpool; svn_stream_t *last_stream; svn_stream_t *stream; const char *target_abspath_or_url; if (start->kind == svn_opt_revision_unspecified || end->kind == svn_opt_revision_unspecified) return svn_error_create (SVN_ERR_CLIENT_BAD_REVISION, NULL, NULL); if (svn_path_is_url(target)) target_abspath_or_url = target; else SVN_ERR(svn_dirent_get_absolute(&target_abspath_or_url, target, pool)); /* Get an RA plugin for this filesystem object. */ SVN_ERR(svn_client__ra_session_from_path(&ra_session, &end_revnum, NULL, target, NULL, peg_revision, end, ctx, pool)); SVN_ERR(svn_client__get_revision_number(&start_revnum, NULL, ctx->wc_ctx, target_abspath_or_url, ra_session, start, pool)); if (end_revnum < start_revnum) return svn_error_create (SVN_ERR_CLIENT_BAD_REVISION, NULL, _("Start revision must precede end revision")); frb.start_rev = start_revnum; frb.end_rev = end_revnum; frb.target = target; frb.ctx = ctx; frb.diff_options = diff_options; frb.ignore_mime_type = ignore_mime_type; frb.include_merged_revisions = include_merged_revisions; frb.last_filename = NULL; frb.last_original_filename = NULL; frb.chain = apr_palloc(pool, sizeof(*frb.chain)); frb.chain->blame = NULL; frb.chain->avail = NULL; frb.chain->pool = pool; if (include_merged_revisions) { frb.merged_chain = apr_palloc(pool, sizeof(*frb.merged_chain)); frb.merged_chain->blame = NULL; frb.merged_chain->avail = NULL; frb.merged_chain->pool = pool; } SVN_ERR(svn_ra_get_repos_root2(ra_session, &frb.repos_root_url, pool)); frb.mainpool = pool; /* The callback will flip the following two pools, because it needs information from the previous call. Obviously, it can't rely on the lifetime of the pool provided by get_file_revs. */ frb.lastpool = svn_pool_create(pool); frb.currpool = svn_pool_create(pool); if (include_merged_revisions) { frb.filepool = svn_pool_create(pool); frb.prevfilepool = svn_pool_create(pool); } /* Collect all blame information. We need to ensure that we get one revision before the start_rev, if available so that we can know what was actually changed in the start revision. */ SVN_ERR(svn_ra_get_file_revs2(ra_session, "", start_revnum - (start_revnum > 0 ? 1 : 0), end_revnum, include_merged_revisions, file_rev_handler, &frb, pool)); if (end->kind == svn_opt_revision_working) { /* If the local file is modified we have to call the handler on the working copy file with keywords unexpanded */ svn_wc_status3_t *status; SVN_ERR(svn_wc_status3(&status, ctx->wc_ctx, target_abspath_or_url, pool, pool)); if (status->text_status != svn_wc_status_normal) { apr_hash_t *props; svn_stream_t *wcfile; svn_string_t *keywords; svn_stream_t *tempfile; const char *temppath; apr_hash_t *kw = NULL; SVN_ERR(svn_wc_prop_list2(&props, ctx->wc_ctx, target_abspath_or_url, pool, pool)); SVN_ERR(svn_stream_open_readonly(&wcfile, target, pool, pool)); keywords = apr_hash_get(props, SVN_PROP_KEYWORDS, APR_HASH_KEY_STRING); if (keywords) SVN_ERR(svn_subst_build_keywords2(&kw, keywords->data, NULL, NULL, 0, NULL, pool)); wcfile = svn_subst_stream_translated(wcfile, "\n", TRUE, kw, FALSE, pool); SVN_ERR(svn_stream_open_unique(&tempfile, &temppath, NULL, svn_io_file_del_on_pool_cleanup, pool, pool)); SVN_ERR(svn_stream_copy3(wcfile, tempfile, ctx->cancel_func, ctx->cancel_baton, pool)); SVN_ERR(add_file_blame(frb.last_filename, temppath, frb.chain, NULL, frb.diff_options, pool)); frb.last_filename = temppath; } } /* Report the blame to the caller. */ /* The callback has to have been called at least once. */ SVN_ERR_ASSERT(frb.last_filename != NULL); /* Create a pool for the iteration below. */ iterpool = svn_pool_create(pool); /* Open the last file and get a stream. */ SVN_ERR(svn_stream_open_readonly(&last_stream, frb.last_filename, pool, pool)); stream = svn_subst_stream_translated(last_stream, "\n", TRUE, NULL, FALSE, pool); /* Perform optional merged chain normalization. */ if (include_merged_revisions) { /* If we never created any blame for the original chain, create it now, with the most recent changed revision. This could occur if a file was created on a branch and them merged to another branch. This is semanticly a copy, and we want to use the revision on the branch as the most recently changed revision. ### Is this really what we want to do here? Do the sematics of copy change? */ if (!frb.chain->blame) frb.chain->blame = blame_create(frb.chain, frb.rev, 0); normalize_blames(frb.chain, frb.merged_chain, pool); walk_merged = frb.merged_chain->blame; } /* Process each blame item. */ for (walk = frb.chain->blame; walk; walk = walk->next) { apr_off_t line_no; svn_revnum_t merged_rev; const char *merged_path; apr_hash_t *merged_rev_props; if (walk_merged) { merged_rev = walk_merged->rev->revision; merged_rev_props = walk_merged->rev->rev_props; merged_path = walk_merged->rev->path; } else { merged_rev = SVN_INVALID_REVNUM; merged_rev_props = NULL; merged_path = NULL; } for (line_no = walk->start; !walk->next || line_no < walk->next->start; ++line_no) { svn_boolean_t eof; svn_stringbuf_t *sb; svn_pool_clear(iterpool); SVN_ERR(svn_stream_readline(stream, &sb, "\n", &eof, iterpool)); if (ctx->cancel_func) SVN_ERR(ctx->cancel_func(ctx->cancel_baton)); if (!eof || sb->len) { if (walk->rev) SVN_ERR(receiver(receiver_baton, start_revnum, end_revnum, line_no, walk->rev->revision, walk->rev->rev_props, merged_rev, merged_rev_props, merged_path, sb->data, FALSE, iterpool)); else SVN_ERR(receiver(receiver_baton, start_revnum, end_revnum, line_no, SVN_INVALID_REVNUM, NULL, SVN_INVALID_REVNUM, NULL, NULL, sb->data, TRUE, iterpool)); } if (eof) break; } if (walk_merged) walk_merged = walk_merged->next; } SVN_ERR(svn_stream_close(stream)); svn_pool_destroy(frb.lastpool); svn_pool_destroy(frb.currpool); if (include_merged_revisions) { svn_pool_destroy(frb.filepool); svn_pool_destroy(frb.prevfilepool); } svn_pool_destroy(iterpool); return SVN_NO_ERROR; }
/** * Perform controlled allocation on behalf of the library */ static void* mod_sslhaf_alloc(sslhaf_cfg_t *cfg, size_t size) { conn_rec *c = cfg->user_data; return apr_palloc(c->pool, size); }
static svn_error_t * end_element(svn_ra_serf__xml_parser_t *parser, svn_ra_serf__dav_props_t name, apr_pool_t *scratch_pool) { iprops_context_t *iprops_ctx = parser->user_data; iprops_state_e state; state = parser->state->current_state; if (state == IPROPS_REPORT && strcmp(name.name, SVN_DAV__INHERITED_PROPS_REPORT) == 0) { svn_ra_serf__xml_pop_state(parser); } else if (state == IPROPS_PATH && strcmp(name.name, SVN_DAV__IPROP_PATH) == 0) { iprops_ctx->curr_iprop = apr_palloc( iprops_ctx->pool, sizeof(svn_prop_inherited_item_t)); iprops_ctx->curr_iprop->path_or_url = svn_path_url_add_component2(iprops_ctx->repos_root_url, iprops_ctx->curr_path->data, iprops_ctx->pool); iprops_ctx->curr_iprop->prop_hash = apr_hash_make(iprops_ctx->pool); svn_ra_serf__xml_pop_state(parser); } else if (state == IPROPS_PROPVAL && strcmp(name.name, SVN_DAV__IPROP_PROPVAL) == 0) { const svn_string_t *prop_val; if (iprops_ctx->curr_prop_val_encoding) { svn_string_t encoded_prop_val; if (strcmp(iprops_ctx->curr_prop_val_encoding, "base64") != 0) return svn_error_create(SVN_ERR_XML_MALFORMED, NULL, NULL); encoded_prop_val.data = iprops_ctx->curr_propval->data; encoded_prop_val.len = iprops_ctx->curr_propval->len; prop_val = svn_base64_decode_string(&encoded_prop_val, iprops_ctx->pool); } else { prop_val = svn_string_create_from_buf(iprops_ctx->curr_propval, iprops_ctx->pool); } svn_hash_sets(iprops_ctx->curr_iprop->prop_hash, apr_pstrdup(iprops_ctx->pool, iprops_ctx->curr_propname->data), prop_val); /* Clear current propname and propval in the event there are multiple properties on the current path. */ svn_stringbuf_setempty(iprops_ctx->curr_propname); svn_stringbuf_setempty(iprops_ctx->curr_propval); svn_ra_serf__xml_pop_state(parser); } else if (state == IPROPS_PROPNAME && strcmp(name.name, SVN_DAV__IPROP_PROPNAME) == 0) { svn_ra_serf__xml_pop_state(parser); } else if (state == IPROPS_ITEM && strcmp(name.name, SVN_DAV__IPROP_ITEM) == 0) { APR_ARRAY_PUSH(iprops_ctx->iprops, svn_prop_inherited_item_t *) = iprops_ctx->curr_iprop; svn_ra_serf__xml_pop_state(parser); }
/* Send the authentication to the log table */ int pg_log_auth_user(request_rec * r, pg_auth_config_rec * sec, char *user, char *sent_pw) { char sql[MAX_STRING_LEN]; char *s; int n; char fields[MAX_STRING_LEN]; char values[MAX_STRING_LEN]; char *safe_user; char *safe_pw; char *safe_req; char ts[MAX_STRING_LEN]; /* time in string format */ apr_time_exp_t t; /* time of request start */ apr_size_t retsize; safe_user = apr_palloc(r->pool, 1 + 2 * strlen(user)); safe_pw = apr_palloc(r->pool, 1 + 2 * strlen(sent_pw)); safe_req = apr_palloc(r->pool, 1 + 2 * strlen(r->the_request)); /* we do not want to process internal redirect */ if (!ap_is_initial_req(r)) return DECLINED; if ((!sec->auth_pg_log_table) || (!sec->auth_pg_log_uname_field) || (!sec->auth_pg_log_date_field)) { // At least table name, username and date field are specified // send error message and exit return DECLINED; } /* AUD: MAX_STRING_LEN probably isn't always correct */ pg_check_string(safe_user, user, strlen(user)); pg_check_string(safe_pw, sent_pw, strlen(sent_pw)); pg_check_string(safe_req, r->the_request, strlen(r->the_request)); if (sec->auth_pg_lowercaseuid) { /* and force it to lowercase */ n = 0; while (safe_user[n] && n < (MAX_STRING_LEN - 1)) { if (isupper(safe_user[n])) { safe_user[n] = tolower(safe_user[n]); } n++; } } if (sec->auth_pg_uppercaseuid) { /* and force it to uppercase */ n = 0; while (safe_user[n] && n < (MAX_STRING_LEN - 1)) { if (islower(safe_user[n])) { safe_user[n] = toupper(safe_user[n]); } n++; } } /* time field format */ apr_time_exp_lt(&t, r->request_time); apr_strftime(ts, &retsize, 100, "%Y-%m-%d %H:%M:%S", &t); /* SQL Statement, required fields: Username, Date */ apr_snprintf(fields, MAX_STRING_LEN, "%s,%s", sec->auth_pg_log_uname_field, sec->auth_pg_log_date_field); apr_snprintf(values, MAX_STRING_LEN, "'%s','%s'", safe_user, ts); /* Optional parameters */ if (sec->auth_pg_log_addrs_field) { /* IP Address field */ apr_snprintf(sql, MAX_STRING_LEN, ", %s", sec->auth_pg_log_addrs_field); strncat(fields, sql, MAX_STRING_LEN - strlen(fields) - 1); apr_snprintf(sql, MAX_STRING_LEN, ", '%s'", r->connection->remote_ip); strncat(values, sql, MAX_STRING_LEN - strlen(values) - 1); } if (sec->auth_pg_log_pwd_field) { /* Password field , clear WARNING */ apr_snprintf(sql, MAX_STRING_LEN, ", %s", sec->auth_pg_log_pwd_field); strncat(fields, sql, MAX_STRING_LEN - strlen(fields) - 1); apr_snprintf(sql, MAX_STRING_LEN, ", '%s'", safe_pw); strncat(values, sql, MAX_STRING_LEN - strlen(values) - 1); } if (sec->auth_pg_log_uri_field) { /* request string */ apr_snprintf(sql, MAX_STRING_LEN, ", %s", sec->auth_pg_log_uri_field); strncat(fields, sql, MAX_STRING_LEN - strlen(fields) - 1); apr_snprintf(sql, MAX_STRING_LEN, ", '%s'", safe_req); strncat(values, sql, MAX_STRING_LEN - strlen(values) - 1); } apr_snprintf(sql, MAX_STRING_LEN, "insert into %s (%s) values(%s) ; ", sec->auth_pg_log_table, fields, values); s = do_pg_query(r, sql, sec); return (0); }
tsvn_svn_diff_t_extension * CDiffData::MovedBlocksDetect(svn_diff_t * diffYourBase, DWORD dwIgnoreWS, apr_pool_t * pool) { LineToGroupMap map; tsvn_svn_diff_t_extension* head = nullptr; tsvn_svn_diff_t_extension* tail = nullptr; svn_diff_t * tempdiff = diffYourBase; LONG baseLine = 0; LONG yourLine = 0; for(;tempdiff; tempdiff = tempdiff->next) // fill map { if(tempdiff->type != svn_diff__type_diff_modified) continue; baseLine = static_cast<LONG>(tempdiff->original_start); if (m_arBaseFile.GetCount() <= (baseLine+tempdiff->original_length)) return nullptr; for(int i = 0; i < tempdiff->original_length; ++i, ++baseLine) { const CString &sCurrentBaseLine = m_arBaseFile.GetAt(baseLine); if (dwIgnoreWS) map.Add(baseLine, GetTrimmedString(sCurrentBaseLine, dwIgnoreWS), 0); else map.Add(baseLine, sCurrentBaseLine, 0); } yourLine = static_cast<LONG>(tempdiff->modified_start); if (m_arYourFile.GetCount() <= (yourLine+tempdiff->modified_length)) return nullptr; for(int i = 0; i < tempdiff->modified_length; ++i, ++yourLine) { const CString &sCurrentYourLine = m_arYourFile.GetAt(yourLine); if(dwIgnoreWS) map.Add(yourLine, GetTrimmedString(sCurrentYourLine, dwIgnoreWS), 1); else map.Add(yourLine, sCurrentYourLine, 1); } } for(tempdiff = diffYourBase; tempdiff; tempdiff = tempdiff->next) { // Scan through diff blocks, finding moved sections from left side // and splitting them out // That is, we actually fragment diff blocks as we find moved sections if(tempdiff->type != svn_diff__type_diff_modified) continue; EquivalencyGroup* pGroup = nullptr; int i; for(i = static_cast<int>(tempdiff->original_start); (i - tempdiff->original_start)< tempdiff->original_length; ++i) { EquivalencyGroup * group = ExtractGroup(map, m_arBaseFile.GetAt(i), dwIgnoreWS); if(group->IsPerfectMatch()) { pGroup = group; break; } } if(!pGroup) // if no match continue; // found a match int j = pGroup->m_LinesRight.GetSingle(); // Ok, now our moved block is the single line (i, j) // extend moved block upward as far as possible int i1 = i - 1; int j1 = j - 1; for(; (i1 >= tempdiff->original_start) && (j1>=0) && (i1>=0); --i1, --j1) { EquivalencyGroup * pGroup0 = ExtractGroup(map, m_arBaseFile.GetAt(i1), dwIgnoreWS); EquivalencyGroup * pGroup1 = ExtractGroup(map, m_arYourFile.GetAt(j1), dwIgnoreWS); if(pGroup1 != pGroup0) break; pGroup0->m_LinesLeft.Remove(i1); pGroup1->m_LinesRight.Remove(j1); } ++i1; ++j1; // Ok, now our moved block is (i1..i, j1..j) // extend moved block downward as far as possible int i2 = i + 1; int j2 = j + 1; for(; ((i2-tempdiff->original_start) < tempdiff->original_length)&&(j2>=0); ++i2, ++j2) { if(i2 >= m_arBaseFile.GetCount() || j2 >= m_arYourFile.GetCount()) break; EquivalencyGroup * pGroup0 = ExtractGroup(map, m_arBaseFile.GetAt(i2), dwIgnoreWS); EquivalencyGroup * pGroup1 = ExtractGroup(map, m_arYourFile.GetAt(j2), dwIgnoreWS); if(pGroup1 != pGroup0) break; pGroup0->m_LinesLeft.Remove(i2); pGroup1->m_LinesRight.Remove(j2); } --i2; --j2; // Ok, now our moved block is (i1..i2,j1..j2) tsvn_svn_diff_t_extension * newTail = CreateDiffExtension(tempdiff, pool); if (!head) { head = newTail; tail = head; } else { tail->next = newTail; tail = newTail; } int prefix = i1 - static_cast<int>(tempdiff->original_start); if(prefix) { // break tempdiff (current change) into two pieces // first part is the prefix, before the moved part // that stays in tempdiff // second part is the moved part & anything after it // that goes in newob // leave the left side (tempdiff->original_length) on tempdiff // so no right side on newob // newob will be the moved part only, later after we split off any suffix from it svn_diff_t* newob = static_cast<svn_diff_t*>(apr_palloc(pool, sizeof(svn_diff_t))); memset(newob, 0, sizeof(*newob)); tail->base = newob; newob->type = svn_diff__type_diff_modified; newob->original_start = i1; newob->modified_start = tempdiff->modified_start + tempdiff->modified_length; newob->modified_length = 0; newob->original_length = tempdiff->original_length - prefix; newob->next = tempdiff->next; tempdiff->original_length = prefix; tempdiff->next = newob; // now make tempdiff point to the moved part (& any suffix) tempdiff = newob; } tail->moved_to = j1; apr_off_t suffix = (tempdiff->original_length) - (i2- (tempdiff->original_start)) - 1; if (suffix) { // break off any suffix from tempdiff // newob will be the suffix, and will get all the right side svn_diff_t* newob = static_cast<svn_diff_t*>(apr_palloc(pool, sizeof(*newob))); memset(newob, 0, sizeof(*newob)); newob->type = svn_diff__type_diff_modified; newob->original_start = i2 + 1; newob->modified_start = tempdiff->modified_start; newob->modified_length = tempdiff->modified_length; newob->original_length = suffix; newob->next = tempdiff->next; tempdiff->modified_length = 0; tempdiff->original_length -= suffix; tempdiff->next = newob; } } // Scan through diff blocks, finding moved sections from right side // and splitting them out // That is, we actually fragment diff blocks as we find moved sections tsvn_svn_diff_t_extension * existing = head; tail = nullptr; for(tempdiff = diffYourBase; tempdiff; tempdiff = tempdiff->next) { // scan down block for a match if(tempdiff->type != svn_diff__type_diff_modified) continue; EquivalencyGroup* pGroup = nullptr; int j = 0; for(j = static_cast<int>(tempdiff->modified_start); (j - tempdiff->modified_start) < tempdiff->modified_length; ++j) { EquivalencyGroup * group = ExtractGroup(map, m_arYourFile.GetAt(j), dwIgnoreWS); if(group->IsPerfectMatch()) { pGroup = group; break; } } // if no match, go to next diff block if (!pGroup) { AdjustExistingAndTail(tempdiff, existing, tail); continue; } // found a match int i = pGroup->m_LinesLeft.GetSingle(); if (i == 0) continue; // Ok, now our moved block is the single line (i,j) // extend moved block upward as far as possible int i1 = i-1; int j1 = j-1; for ( ; (j1>=tempdiff->modified_start) && (j1>=0) && (i1>=0); --i1, --j1) { EquivalencyGroup * pGroup0 = ExtractGroup(map, m_arBaseFile.GetAt(i1), dwIgnoreWS); EquivalencyGroup * pGroup1 = ExtractGroup(map, m_arYourFile.GetAt(j1), dwIgnoreWS); if (pGroup0 != pGroup1) break; pGroup0->m_LinesLeft.Remove(i1); pGroup1->m_LinesRight.Remove(j1); } ++i1; ++j1; // Ok, now our moved block is (i1..i,j1..j) // extend moved block downward as far as possible int i2 = i+1; int j2 = j+1; for ( ; (j2-(tempdiff->modified_start) < tempdiff->modified_length) && (i2>=0); ++i2,++j2) { if(i2 >= m_arBaseFile.GetCount() || j2 >= m_arYourFile.GetCount()) break; EquivalencyGroup * pGroup0 = ExtractGroup(map, m_arBaseFile.GetAt(i2), dwIgnoreWS); EquivalencyGroup * pGroup1 = ExtractGroup(map, m_arYourFile.GetAt(j2), dwIgnoreWS); if (pGroup0 != pGroup1) break; pGroup0->m_LinesLeft.Remove(i2); pGroup1->m_LinesRight.Remove(j2); } --i2; --j2; // Ok, now our moved block is (i1..i2,j1..j2) tsvn_svn_diff_t_extension* newTail = nullptr; if(existing && existing->base == tempdiff) { newTail = existing; } else { newTail = CreateDiffExtension(tempdiff, pool); if (!head) head = newTail; else if(tail) { newTail->next = tail->next; tail->next = newTail; } } tail = newTail; apr_off_t prefix = j1 - (tempdiff->modified_start); if (prefix) { // break tempdiff (current change) into two pieces // first part is the prefix, before the moved part // that stays in tempdiff // second part is the moved part & anything after it // that goes in newob // leave the left side (tempdiff->original_length) on tempdiff // so no right side on newob // newob will be the moved part only, later after we split off any suffix from it svn_diff_t* newob = static_cast<svn_diff_t*>(apr_palloc(pool, sizeof(*newob))); memset(newob, 0, sizeof(*newob)); newob->type = svn_diff__type_diff_modified; if(existing == newTail) { newTail = CreateDiffExtension(newob, pool); newTail->next = tail->next; tail->next = newTail; tail = newTail; } tail->base = newob; newob->original_start = tempdiff->original_start + tempdiff->original_length; newob->modified_start = j1; newob->modified_length = tempdiff->modified_length - prefix; newob->original_length = 0; newob->next = tempdiff->next; tempdiff->modified_length = prefix; tempdiff->next = newob; // now make tempdiff point to the moved part (& any suffix) tempdiff = newob; } // now tempdiff points to a moved diff chunk with no prefix, but maybe a suffix tail->moved_from = i1; apr_off_t suffix = (tempdiff->modified_length) - (j2-(tempdiff->modified_start)) - 1; if (suffix) { // break off any suffix from tempdiff // newob will be the suffix, and will get all the left side svn_diff_t* newob = static_cast<svn_diff_t*>(apr_palloc(pool, sizeof(*newob))); memset(newob, 0, sizeof(*newob)); tsvn_svn_diff_t_extension * eNewOb = CreateDiffExtension(newob, pool); newob->type = svn_diff__type_diff_modified; newob->original_start = tempdiff->original_start; newob->modified_start = j2+1; newob->modified_length = suffix; newob->original_length = tempdiff->original_length; newob->next = tempdiff->next; eNewOb->moved_from = -1; eNewOb->moved_to = tail->moved_to; tempdiff->modified_length -= suffix; tempdiff->original_length = 0; tail->moved_to = -1; tempdiff->next = newob; eNewOb->next = tail->next; tail->next = eNewOb; existing = tail = eNewOb; } AdjustExistingAndTail(tempdiff, existing, tail); } return head; }
static apr_status_t impl_pollset_create(apr_pollset_t *pollset, apr_uint32_t size, apr_pool_t *p, apr_uint32_t flags) { apr_status_t rv; pollset->p = apr_palloc(p, sizeof(apr_pollset_private_t)); #if APR_HAS_THREADS if (flags & APR_POLLSET_THREADSAFE && ((rv = apr_thread_mutex_create(&pollset->p->ring_lock, APR_THREAD_MUTEX_DEFAULT, p)) != APR_SUCCESS)) { pollset->p = NULL; return rv; } #else if (flags & APR_POLLSET_THREADSAFE) { pollset->p = NULL; return APR_ENOTIMPL; } #endif /* POLLIN and POLLOUT are represented in different returned * events, so we need 2 entries per descriptor in the result set, * both for what is returned by kevent() and what is returned to * the caller of apr_pollset_poll() (since it doesn't spend the * CPU to coalesce separate APR_POLLIN and APR_POLLOUT events * for the same descriptor) */ pollset->p->setsize = 2 * size; pollset->p->ke_set = (struct kevent *) apr_palloc(p, pollset->p->setsize * sizeof(struct kevent)); memset(pollset->p->ke_set, 0, pollset->p->setsize * sizeof(struct kevent)); pollset->p->kqueue_fd = kqueue(); if (pollset->p->kqueue_fd == -1) { pollset->p = NULL; return apr_get_netos_error(); } { int flags; if ((flags = fcntl(pollset->p->kqueue_fd, F_GETFD)) == -1) return errno; flags |= FD_CLOEXEC; if (fcntl(pollset->p->kqueue_fd, F_SETFD, flags) == -1) return errno; } pollset->p->result_set = apr_palloc(p, pollset->p->setsize * sizeof(apr_pollfd_t)); APR_RING_INIT(&pollset->p->query_ring, pfd_elem_t, link); APR_RING_INIT(&pollset->p->free_ring, pfd_elem_t, link); APR_RING_INIT(&pollset->p->dead_ring, pfd_elem_t, link); return APR_SUCCESS; }
int mod_but_analyze_response_headers(void *result, const char *key, const char *value) { /* This function is called for all HTTP RESPONSE HEADER HTTP/1.1 302 Found Date: Mon, 22 Aug 2005 21:10:45 GMT Set-Cookie: E2=jLllj33EsXhInvgW5KDkMtzB4YcqLy2Eawv1EAbY0K3NGUHczLF1oIrJ7bURyw1; domain=but.ch; path=/; Set-Cookie: TEST=ABC; Set-Cookie: FREECOOKIE=123; Location: /cgi/cgi-bin/printenv?__cookie_try=1 Content-Length: 281 Content-Type: text/html; charset=iso-8859-1 It checks the Set-Cookie headers. */ cookie_res * cr = (cookie_res *) result; request_rec *r = cr->r; apr_rmm_t *cs_rmm = find_cs_rmm(); apr_rmm_off_t *off = find_cs_rmm_off(); mod_but_server_t *config; mod_but_dir_t *dconfig = ap_get_module_config(r->per_dir_config, &but_module); pcre *re; // the regular expression const char *error; // error text for the failed regex compilation int error_offset; // offset of the regex compilation error, if any int rc = 0; // return code of pcre_exec int re_vector[3072]; apr_int64_t num_set_cookie; apr_int64_t auth_strength; char *qa = (char *)apr_pstrdup(r->pool, value); char *p, *last; ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: CALLING OUTPUT FILTER"); config = ap_get_module_config(r->server->module_config, &but_module); if (config == NULL) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Illegal server record (output filter)"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; } ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Request URI [%s]", r->uri); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Working with SHM offset [%s]", apr_table_get(r->notes, "SHMOFFSET")); re = pcre_compile("cOOkIe", PCRE_CASELESS, &error, &error_offset, NULL); if (re == NULL) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: return code of pcre_compile in Cookie Store is NULL"); return DECLINED; } if(key==NULL){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: key is NULL"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; } rc = pcre_exec(re, NULL, key, strlen(key), 0, 0, re_vector, 3072); if (rc < 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Set-Cookie was not in ARGS = %s", key); return DECLINED; } if (rc == 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: PCRE output vector too small (%d)", 3072/3-1); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Problems with the following ARGS = %s", key); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; } if (rc > 0) { char* val1; char* substr; char* key1; mod_but_cookie_cookiestore *csp; apr_rmm_t *cs_rmm_cookiestore; apr_rmm_off_t *off_cookiestore; ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: ====================== FIND SET-COOKIE HEADER ====================="); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Found Set-Cookie [%s]=[%s]", key,value); /* Store Set-Cookie attributes into mod_but_cookie_cookiestore struct */ substr = strchr(value, '=' ); key1 = (char*)apr_pstrndup(r->pool, value, (strlen(value)-strlen(substr)) ); substr++; // now substr points to the value if (strchr(substr,';')) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: OUTPUT_FILTER: COOKIE HAS \";\""); val1 = (char*)apr_pstrndup( r->pool, substr, (strlen(substr)-strlen(strchr(substr,';'))) ); } else { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: OUTPUT_FILTER: COOKIE HAS NO \";\""); val1 = (char*)apr_pstrndup( r->pool, substr, (strlen(substr))); } if (!apr_strnatcmp(key1, "") && !apr_strnatcmp(val1, "")){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Unparsed %s - %s", key1, val1); return OK; } csp = apr_palloc(r->pool, sizeof(mod_but_cookie_cookiestore)); apr_cpystrn(csp->cookie_name, key1, sizeof(csp->cookie_name)); apr_cpystrn(csp->cookie_value, val1, sizeof(csp->cookie_value)); if (dconfig == NULL) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_authorization.c: Illegal Directory Config (location_id)"); } csp->location_id = dconfig->mod_but_location_id; // remember the location, for which a cookie was set. ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: COOKIE LOCATION ID [%d]", csp->location_id); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: PARSED COOKIENAME AND VALUE [%s]-[%s]", csp->cookie_name, csp->cookie_value); cs_rmm_cookiestore = find_cs_rmm_cookiestore(); off_cookiestore = find_cs_rmm_off_cookiestore(); if(apr_table_get(r->notes, "SHMOFFSET")){ apr_int64_t i = apr_atoi64(apr_table_get(r->notes, "SHMOFFSET")); mod_but_cookie *c = apr_rmm_addr_get(cs_rmm, off[i]); /* 1) LOGON cookie? 2) SERVICE_LIST cookie? 3) FREE COOKIE? 4) MOD_BUT_SESSION? 5) Others */ /* 1) Lets see, if the cookie is a LOGON cookie */ if (!apr_strnatcmp(csp->cookie_name, config->global_logon_auth_cookie_name)){ /* First, we set the logon flag to true */ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: FOUND LOGON Header"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Requesting r->uri is: %s", r->uri); re = pcre_compile(config->authorized_logon_url, PCRE_CASELESS, &error, &error_offset, NULL); rc = pcre_exec(re, NULL, r->uri, strlen(r->uri), 0, 0, re_vector, 3072); if (rc < 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: LOGON=ok from unauthorized source - we denied it"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Unsetting LOGON=ok from response header"); return DECLINED; } if (rc == 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: PCRE output vector too small (%d)", 3072/3-1); return DECLINED; } if (rc > 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: LOGON comes form a trusted/authorized source"); if (!apr_strnatcmp(csp->cookie_value, config->global_logon_auth_cookie_value)){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: LOGON=ok comes form a trusted/authorized source"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: LOGON=ok (set c->logon_state=1)"); c->logon_state=1; apr_table_set(r->notes, "LOGON_STATUS", "OK"); } // unset LOGON cookie from the response header ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Unsetting LOGON=ok from response header"); return DECLINED; } } /* 3) Check if we have a FREE Cookie (configured in httpd.conf) We do not store FREE Cookies into the cookie store */ if(config->session_store_free_cookies){ char *temp; ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: MOD_BUT_SESSION_STORE_FREE_COOKIES is configured"); re = pcre_compile(config->session_store_free_cookies, 0, &error, &error_offset, NULL); if (re == NULL) { // ap_log_rerror(PC_LOG_INFO, } temp = apr_pstrcat(r->pool, key1, "=", value, NULL); rc = pcre_exec(re, NULL, temp, strlen(temp), 0, 0, re_vector, 3072); if (rc < 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Set-Cookie is not a FREE COOKIE key = %s | value = %s", key1, value); } if (rc == 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: PCRE output vector too small (%d)", 3072/3-1); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Problems with the following ARGS = %s", key1); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; } if (rc > 0) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: FOUND FREE COOKIE [%s] [%s]", key1, value); num_set_cookie = apr_atoi64(apr_table_get(r->notes, "NUM_SET_COOKIE")); num_set_cookie += 1; apr_table_set(r->notes, "NUM_SET_COOKIE", apr_itoa(r->pool, num_set_cookie)); apr_table_set(r->notes, apr_itoa(r->pool, num_set_cookie), value); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter: VALUE IS [%s]", apr_table_get(r->notes, apr_itoa(r->pool, num_set_cookie))); return DECLINED; } } /* 4) If the Cookie is the MOD_BUT_SESSION, we don't want to have that cookie stored in the cookie store This means, that NO backend application is allowed to have the same cookie name as the MOD_BUT_SESSION */ if (!apr_strnatcmp(key1, config->cookie_name)){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Set-Cookie is MOD_BUT_SESSION"); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; } /* 5) If LOGON=ok, we will store the special meaning cookies in a special way here. */ if (apr_table_get(r->notes, "LOGON_STATUS") != NULL){ if (!apr_strnatcmp(key1, "MOD_BUT_AUTH_STRENGTH")){ auth_strength = apr_atoi64(val1); if ((auth_strength >= 0) || (auth_strength <= 2)) { c->auth_strength=auth_strength; } else { c->auth_strength= 0; // default value, if auth_strength is not parseable or greater than 2 } return DECLINED; } /* Lets see, if the SERVICE_LIST cookie is set */ if (!apr_strnatcmp(csp->cookie_name, config->service_list_cookie_name)){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: FOUND SERVICE LIST Cookiename (Authorization Regex)"); apr_cpystrn(c->service_list, val1, sizeof(c->service_list)); return DECLINED; } if (!apr_strnatcmp(key1, "MOD_BUT_BACKEND_SESSION")){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: FOUND MOD_BUT_BACKEND_SESSION [%s]", value); char *p1 = NULL; char *p2 = NULL; char *p3 = NULL; char *p11 = NULL; char *p21 = NULL; char *p31 = NULL; for(p = (char *)apr_strtok(qa, "; ", &last); p != NULL; p = (char *)apr_strtok(NULL, "; ", &last)) { p1 = strstr(p, "bname"); if(p1){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bname found [%s]", p1); p1 += strlen("bname"); if(*p1 == '=') { ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bname [%s]", (char *)apr_pstrdup(r->pool, p1+1)); p11 = apr_pstrdup(r->pool, p1+1); } } p2 = strstr(p, "bvalue"); if(p2){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bvalue [%s]", p2); p2 += strlen("bvalue"); if(*p2 == '=') { ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bvalue [%s]", (char *)apr_pstrdup(r->pool, p2+1)); p21 = apr_pstrdup(r->pool, p2+1); } } p3 = strstr(p, "bclearance"); if(p3){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bclearance [%s]", p3); p3 += strlen("bclearance"); if(*p3 == '=') { ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bclearance [%s]", (char *)apr_pstrdup(r->pool, p3+1)); p31 = apr_pstrdup(r->pool, p3+1); } } } ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: bname found [%s]=[%s] CLEAR [%s]", p11,p21,p31); for(p31 = apr_strtok(p31, ",", &last); p31 != NULL; p31 = apr_strtok(NULL, ",", &last)) { ap_log_rerror(PC_LOG_INFO, r, "mod_but_request_filter.c: P31 = [%s]", p31); apr_cpystrn(csp->cookie_name, p11, sizeof(csp->cookie_name)); apr_cpystrn(csp->cookie_value, p21, sizeof(csp->cookie_value)); csp->location_id = apr_atoi64(p31); if (c->link_to_cookiestore == -1){ /* Here we have to update the c->link_to_cookiestore */ int cookiestore_offset = find_empty_cookiestore_slot(r); if (cookiestore_offset >= 0){ mod_but_cookie_cookiestore *cs; /* If we are here, we found an empty cookiestore shm storage we can put our stuff into */ cs = apr_rmm_addr_get(cs_rmm_cookiestore, off_cookiestore[cookiestore_offset]); apr_cpystrn(cs->cookie_name, p11, sizeof(cs->cookie_name)); apr_cpystrn(cs->cookie_value, p21, sizeof(cs->cookie_value)); c->link_to_cookiestore = cookiestore_offset; cs->location_id = apr_atoi64(p31); }else{ /* If we are here, we did not have more cookiestore shm */ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Unable finding new cookiestore slot"); apr_table_set(r->notes, "CS_SHM" , "PROBLEM"); } } else { int status; // if we are here, we are not the first cookie to be saved. status = store_cookie_in_cookiestore(r, c->link_to_cookiestore, csp); if (status == 30){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: All Cookiestore SHM used [%d] - Status", status); apr_table_set(r->notes, "CS_SHM" , "PROBLEM"); } } } /* Loop around clearance and save the cookies into the correct location_id */ return DECLINED; } } /* 6) If the Cookie does not have a special meaning to us, let's store them in the session store (without DLS) */ // store all other cookies to the cookiestore if (c->link_to_cookiestore == -1){ /* Here we have to update the c->link_to_cookiestore */ int cookiestore_offset = find_empty_cookiestore_slot(r); if (cookiestore_offset >= 0){ mod_but_cookie_cookiestore *cs; /* If we are here, we found an empty cookiestore shm storage we can put our stuff into */ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: OUTPUT FILTER: ANCHOR LINK TO COOKIE STORE [%d]", cookiestore_offset); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Copy HEADER @ CS offset %d", cookiestore_offset); cs = apr_rmm_addr_get(cs_rmm_cookiestore, off_cookiestore[cookiestore_offset]); apr_cpystrn(cs->cookie_name, key1, sizeof(cs->cookie_name)); apr_cpystrn(cs->cookie_value, val1, sizeof(cs->cookie_value)); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: STOREING NEW cookie_name [%s]=[%s] in CookieStore", cs->cookie_name, cs->cookie_value); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: STOREING NEW cookie_name [%s] and cookie_value [%s] @ CS offset [%d] and cookie_next is [%d]", cs->cookie_name, cs->cookie_value, cookiestore_offset, cs->cookie_next); c->link_to_cookiestore = cookiestore_offset; cs->location_id = dconfig->mod_but_location_id; ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: STOREING NEW cookie_name [%s] = [%s] ", cs->cookie_name, cs->cookie_value); ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: STOREING NEW cookie_name [%s] and cookie_value [%s] @ CS offset [%d] and cookie_next is [%d] and cookie_before is [%d]", cs->cookie_name, cs->cookie_value, cookiestore_offset, cs->cookie_next, cs->cookie_before); }else{ /* If we are here, we did not have more cookiestore shm */ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: Unable finding new cookiestore slot"); apr_table_set(r->notes, "CS_SHM" , "PROBLEM"); } } else { int status; // if we are here, we are not the first cookie to be saved. ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: STORE [%s]=[%s]", csp->cookie_name,csp->cookie_value); status = store_cookie_in_cookiestore(r, c->link_to_cookiestore, csp); if (status == 30){ ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: All Cookiestore SHM used [%d] - Status", status); apr_table_set(r->notes, "CS_SHM" , "PROBLEM"); } } } } ap_log_rerror(PC_LOG_INFO, r, "mod_but_output_filter.c: END OF OUTPUT FILTER"); return DECLINED; }
static int validate_server_certificate(int cert_valid, X509_STORE_CTX *store_ctx) { SSL *ssl; serf_ssl_context_t *ctx; X509 *server_cert; int err, depth; int failures = 0; ssl = X509_STORE_CTX_get_ex_data(store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); ctx = SSL_get_app_data(ssl); server_cert = X509_STORE_CTX_get_current_cert(store_ctx); depth = X509_STORE_CTX_get_error_depth(store_ctx); /* If the certification was found invalid, get the error and convert it to something our caller will understand. */ if (! cert_valid) { err = X509_STORE_CTX_get_error(store_ctx); switch(err) { case X509_V_ERR_CERT_NOT_YET_VALID: failures |= SERF_SSL_CERT_NOTYETVALID; break; case X509_V_ERR_CERT_HAS_EXPIRED: failures |= SERF_SSL_CERT_EXPIRED; break; case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: failures |= SERF_SSL_CERT_SELF_SIGNED; break; case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: failures |= SERF_SSL_CERT_UNKNOWNCA; break; default: failures |= SERF_SSL_CERT_UNKNOWN_FAILURE; break; } } /* Check certificate expiry dates. */ if (X509_cmp_current_time(X509_get_notBefore(server_cert)) >= 0) { failures |= SERF_SSL_CERT_NOTYETVALID; } else if (X509_cmp_current_time(X509_get_notAfter(server_cert)) <= 0) { failures |= SERF_SSL_CERT_EXPIRED; } if (ctx->server_cert_callback && (depth == 0 || failures)) { apr_status_t status; serf_ssl_certificate_t *cert; apr_pool_t *subpool; apr_pool_create(&subpool, ctx->pool); cert = apr_palloc(subpool, sizeof(serf_ssl_certificate_t)); cert->ssl_cert = server_cert; cert->depth = depth; /* Callback for further verification. */ status = ctx->server_cert_callback(ctx->server_cert_userdata, failures, cert); if (status == APR_SUCCESS) cert_valid = 1; else /* Pass the error back to the caller through the context-run. */ ctx->pending_err = status; apr_pool_destroy(subpool); } return cert_valid; }
/** Create ASR engine */ ASR_CLIENT_DECLARE(asr_engine_t*) asr_engine_create( const char *root_dir_path, apt_log_priority_e log_priority, apt_log_output_e log_output) { apr_pool_t *pool = NULL; apt_dir_layout_t *dir_layout; asr_engine_t *engine; mrcp_client_t *mrcp_client; mrcp_application_t *mrcp_app; /* create APR pool */ pool = apt_pool_create(); if(!pool) { return NULL; } /* create the structure of default directories layout */ dir_layout = apt_default_dir_layout_create(root_dir_path,pool); /* create singleton logger */ apt_log_instance_create(log_output,log_priority,pool); if((log_output & APT_LOG_OUTPUT_FILE) == APT_LOG_OUTPUT_FILE) { /* open the log file */ apt_log_file_open(dir_layout->log_dir_path,"unimrcpclient",MAX_LOG_FILE_SIZE,MAX_LOG_FILE_COUNT,pool); } engine = apr_palloc(pool,sizeof(asr_engine_t)); engine->pool = pool; engine->mrcp_client = NULL; engine->mrcp_app = NULL; /* create UniMRCP client stack */ mrcp_client = unimrcp_client_create(dir_layout); if(!mrcp_client) { apt_log_instance_destroy(); apr_pool_destroy(pool); return NULL; } /* create an application */ mrcp_app = mrcp_application_create( app_message_handler, engine, pool); if(!mrcp_app) { mrcp_client_destroy(mrcp_client); apt_log_instance_destroy(); apr_pool_destroy(pool); return NULL; } /* register application in client stack */ mrcp_client_application_register(mrcp_client,mrcp_app,"ASRAPP"); /* start client stack */ if(mrcp_client_start(mrcp_client) != TRUE) { mrcp_client_destroy(mrcp_client); apt_log_instance_destroy(); apr_pool_destroy(pool); return NULL; } engine->mrcp_client = mrcp_client; engine->mrcp_app = mrcp_app; return engine; }
static css_stylesheet_t * s_chxj_css_parse_from_buf(request_rec *r, apr_pool_t *pool, struct css_already_import_stack *imported_stack, css_stylesheet_t *old_stylesheet, const char *css) { apr_size_t srclen; SCSSParserPtr_t parser = NULL; SCSSSACHandlerPtr_t handler = NULL; css_stylesheet_t *stylesheet = NULL; struct css_app_data app_data; struct css_already_import_stack *new_stack; DBG(r,"REQ[%X] start %s()",TO_ADDR(r),__func__); DBG(r,"REQ[%X] css:[%s]", TO_ADDR(r),css); srclen = strlen(css); /* create parser */ parser = scss_parser_new_from_buf(pool, css, ""); if (!parser) { ERR(r,"REQ[%X] %s:%d end chxj_css_parse_from_uri(): scss_parser_new_from_buf() failed", TO_ADDR(r),APLOG_MARK); return NULL; } handler = scss_doc_handler_new(parser); if (!handler) { ERR(r,"REQ[%X] %s:%d end chxj_css_parse_from_uri(): scss_doc_handler_new() failed", TO_ADDR(r),APLOG_MARK); return NULL; } stylesheet = apr_palloc(pool, sizeof(*stylesheet)); memset(stylesheet, 0, sizeof(*stylesheet)); stylesheet->selector_head.next = &stylesheet->selector_head; stylesheet->selector_head.ref = &stylesheet->selector_head.next; memset(&app_data, 0, sizeof(struct css_app_data)); app_data.stylesheet = stylesheet; app_data.selector_list = NULL; app_data.selector_count = 0; app_data.pool = pool; app_data.error_occured = 0; app_data.r = r; if (imported_stack) { s_copy_already_import_stack(pool, &app_data.imported_stack_head, imported_stack); } else { app_data.imported_stack_head.next = &app_data.imported_stack_head; app_data.imported_stack_head.ref = &app_data.imported_stack_head.next; } scss_doc_set_user_data(parser->doc, &app_data); new_stack = apr_palloc(pool, sizeof(*new_stack)); memset(new_stack, 0, sizeof(*new_stack)); new_stack->next = new_stack; new_stack->ref = &new_stack->next; new_stack->full_url = ""; list_insert(new_stack, (&app_data.imported_stack_head)); handler->startSelector = s_css_parser_from_uri_start_selector; handler->endSelector = s_css_parser_from_uri_end_selector; handler->property = s_css_parser_from_uri_property; handler->import = s_css_parser_from_uri_import_style; scss_parse_stylesheet(parser); DBG(r,"REQ[%X] css:[%s]", TO_ADDR(r),css); DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return s_merge_stylesheet(pool, old_stylesheet, app_data.stylesheet); }
/* ** functions. */ int load_exif_from_memory( unsigned char **exif_data, unsigned int *exif_size, request_rec *r, const unsigned char *data, unsigned int data_len) { // scan SOI marker. if (data_len <= 2) return 0; data_len -= 2; unsigned char c1 = *data++; unsigned char c2 = *data++; if (c1 != 0xff || c2 != M_SOI) { return 0; } int num_marker = 0; unsigned char *marker_data[MAX_MARKERS]; unsigned int marker_size[MAX_MARKERS]; // scan marker. for (;;) { unsigned char c; for (;;) { c = *data++; if (data_len == 0) return 0; data_len--; if (c == 0xff) break; } for (;;) { c = *data++; if (data_len == 0) return 0; data_len--; if (c != 0xff) break; } // check marker. if (c == M_EOI || c == M_SOS || c == 0) { break; } else if (c == M_APP1 || c == M_COM) { // get length of app1. unsigned int length; length = (*data++ << 8); length += *(data++ + 1); // validate length. if (length < 2) return 0; // get app1 pointer and length. if (num_marker < MAX_MARKERS) { marker_data[num_marker] = (unsigned char *)(data - 4); marker_size[num_marker] = length + 2; num_marker++; } // skip pointer. if (data_len <= length) return 0; data_len -= length; data += length - 2; } else { // get length of app1. unsigned int length; length = (*data++ << 8); length += *(data++ + 1); // validate length. if (length < 2) return 0; // skip pointer. if (data_len <= length) return 0; data_len -= length; data += length - 2; } } // copy app1. int i; unsigned int exif_size_total = 0; for (i = 0; i < num_marker; i++) { exif_size_total += marker_size[i]; } *exif_size = exif_size_total; *exif_data = apr_palloc(r->pool, exif_size_total); unsigned char *exif_data_ptr = *exif_data; for (i = 0; i < num_marker; i++) { memcpy(exif_data_ptr, marker_data[i], marker_size[i]); exif_data_ptr += marker_size[i]; } return 1; }
static css_stylesheet_t * s_chxj_css_parse_from_uri(request_rec *r, apr_pool_t *pool, struct css_already_import_stack *imported_stack, css_stylesheet_t *old_stylesheet, const char *uri) { SCSSParserPtr_t parser = NULL; SCSSSACHandlerPtr_t handler = NULL; char *css = NULL; char *full_url = NULL; apr_size_t srclen; css_stylesheet_t *stylesheet = NULL; struct css_already_import_stack *new_stack; struct css_app_data app_data; char *base_url; apr_size_t css_len; DBG(r,"REQ[%X] start %s()",TO_ADDR(r),__func__); DBG(r,"REQ[%X] uri:[%s]", TO_ADDR(r),uri); base_url = s_uri_to_base_url(&r->parsed_uri, pool); full_url = s_path_to_fullurl(pool, base_url, r->parsed_uri.path, uri); /* check already import */ if (imported_stack && s_is_already_imported(imported_stack, full_url)) { DBG(r,"REQ[%X] already imported:[%s]", TO_ADDR(r),full_url); DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return NULL; } /* GET request */ css = chxj_serf_get(r, pool, full_url, 0, &css_len); if (css == NULL) { ERR(r,"REQ[%X] %s:%d end chxj_css_parse_from_uri(): serf_get failed: url:[%s]", TO_ADDR(r),APLOG_MARK, uri); DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return NULL; } srclen = strlen(css); /* create parser */ parser = scss_parser_new_from_buf(pool, css, ""); if (!parser) { ERR(r,"REQ[%X] %s:%d end chxj_css_parse_from_uri(): cr_parser_new_from_buf() failed", TO_ADDR(r),APLOG_MARK); return NULL; } /* create handler */ handler = scss_doc_handler_new(parser); if (!handler) { ERR(r, "REQ[%X] %s:%d end chxj_css_parse_from_uri(): cr_doc_handler_new() failed", TO_ADDR(r),APLOG_MARK); return NULL; } stylesheet = apr_palloc(pool, sizeof(*stylesheet)); memset(stylesheet, 0, sizeof(*stylesheet)); stylesheet->selector_head.next = &stylesheet->selector_head; stylesheet->selector_head.ref = &stylesheet->selector_head.next; memset(&app_data, 0, sizeof(struct css_app_data)); app_data.stylesheet = stylesheet; app_data.selector_list = NULL; app_data.selector_count = 0; app_data.pool = pool; app_data.error_occured = 0; app_data.r = r; if (imported_stack) { s_copy_already_import_stack(pool, &app_data.imported_stack_head, imported_stack); } else { app_data.imported_stack_head.next = &app_data.imported_stack_head; app_data.imported_stack_head.ref = &app_data.imported_stack_head.next; } new_stack = apr_palloc(pool, sizeof(*new_stack)); memset(new_stack, 0, sizeof(*new_stack)); new_stack->next = new_stack; new_stack->ref = &new_stack->next; new_stack->full_url = full_url; list_insert(new_stack, (&app_data.imported_stack_head)); scss_doc_set_user_data(parser->doc, &app_data); handler->startSelector = s_css_parser_from_uri_start_selector; handler->endSelector = s_css_parser_from_uri_end_selector; handler->property = s_css_parser_from_uri_property; handler->import = s_css_parser_from_uri_import_style; scss_parse_stylesheet(parser); DBG(r,"REQ[%X] url:[%s]", TO_ADDR(r),uri); DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return s_merge_stylesheet(pool, old_stylesheet, app_data.stylesheet); }
const char * svn_time_to_human_cstring(apr_time_t when, apr_pool_t *pool) { apr_time_exp_t exploded_time; apr_size_t len, retlen; apr_status_t ret; char *datestr, *curptr, human_datestr[SVN_TIME__MAX_LENGTH]; /* Get the time into parts */ ret = apr_time_exp_lt(&exploded_time, when); if (ret) return NULL; /* Make room for datestring */ datestr = apr_palloc(pool, SVN_TIME__MAX_LENGTH); /* Put in machine parseable part */ len = apr_snprintf(datestr, SVN_TIME__MAX_LENGTH, HUMAN_TIMESTAMP_FORMAT, exploded_time.tm_year + 1900, exploded_time.tm_mon + 1, exploded_time.tm_mday, exploded_time.tm_hour, exploded_time.tm_min, exploded_time.tm_sec, exploded_time.tm_gmtoff / (60 * 60), (abs(exploded_time.tm_gmtoff) / 60) % 60); /* If we overfilled the buffer, just return what we got. */ if (len >= SVN_TIME__MAX_LENGTH) return datestr; /* Calculate offset to the end of the machine parseable part. */ curptr = datestr + len; /* Put in human explanatory part */ ret = apr_strftime(human_datestr, &retlen, SVN_TIME__MAX_LENGTH - len, HUMAN_TIMESTAMP_FORMAT_SUFFIX, &exploded_time); /* If there was an error, ensure that the string is zero-terminated. */ if (ret || retlen == 0) *curptr = '\0'; else { const char *utf8_string; svn_error_t *err; err = svn_utf_cstring_to_utf8(&utf8_string, human_datestr, pool); if (err) { *curptr = '\0'; svn_error_clear(err); } else apr_cpystrn(curptr, utf8_string, SVN_TIME__MAX_LENGTH - len); } return datestr; }
apr_status_t ap_core_input_filter(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t rv; core_net_rec *net = f->ctx; core_ctx_t *ctx = net->in_ctx; const char *str; apr_size_t len; if (mode == AP_MODE_INIT) { /* * this mode is for filters that might need to 'initialize' * a connection before reading request data from a client. * NNTP over SSL for example needs to handshake before the * server sends the welcome message. * such filters would have changed the mode before this point * is reached. however, protocol modules such as NNTP should * not need to know anything about SSL. given the example, if * SSL is not in the filter chain, AP_MODE_INIT is a noop. */ return APR_SUCCESS; } if (!ctx) { net->in_ctx = ctx = apr_palloc(f->c->pool, sizeof(*ctx)); ctx->b = apr_brigade_create(f->c->pool, f->c->bucket_alloc); ctx->tmpbb = apr_brigade_create(f->c->pool, f->c->bucket_alloc); /* seed the brigade with the client socket. */ rv = ap_run_insert_network_bucket(f->c, ctx->b, net->client_socket); if (rv != APR_SUCCESS) return rv; } else if (APR_BRIGADE_EMPTY(ctx->b)) { return APR_EOF; } /* ### This is bad. */ BRIGADE_NORMALIZE(ctx->b); /* check for empty brigade again *AFTER* BRIGADE_NORMALIZE() * If we have lost our socket bucket (see above), we are EOF. * * Ideally, this should be returning SUCCESS with EOS bucket, but * some higher-up APIs (spec. read_request_line via ap_rgetline) * want an error code. */ if (APR_BRIGADE_EMPTY(ctx->b)) { return APR_EOF; } if (mode == AP_MODE_GETLINE) { /* we are reading a single LF line, e.g. the HTTP headers */ rv = apr_brigade_split_line(b, ctx->b, block, HUGE_STRING_LEN); /* We should treat EAGAIN here the same as we do for EOF (brigade is * empty). We do this by returning whatever we have read. This may * or may not be bogus, but is consistent (for now) with EOF logic. */ if (APR_STATUS_IS_EAGAIN(rv) && block == APR_NONBLOCK_READ) { rv = APR_SUCCESS; } return rv; } /* ### AP_MODE_PEEK is a horrific name for this mode because we also * eat any CRLFs that we see. That's not the obvious intention of * this mode. Determine whether anyone actually uses this or not. */ if (mode == AP_MODE_EATCRLF) { apr_bucket *e; const char *c; /* The purpose of this loop is to ignore any CRLF (or LF) at the end * of a request. Many browsers send extra lines at the end of POST * requests. We use the PEEK method to determine if there is more * data on the socket, so that we know if we should delay sending the * end of one request until we have served the second request in a * pipelined situation. We don't want to actually delay sending a * response if the server finds a CRLF (or LF), becuause that doesn't * mean that there is another request, just a blank line. */ while (1) { if (APR_BRIGADE_EMPTY(ctx->b)) return APR_EOF; e = APR_BRIGADE_FIRST(ctx->b); rv = apr_bucket_read(e, &str, &len, APR_NONBLOCK_READ); if (rv != APR_SUCCESS) return rv; c = str; while (c < str + len) { if (*c == APR_ASCII_LF) c++; else if (*c == APR_ASCII_CR && *(c + 1) == APR_ASCII_LF) c += 2; else return APR_SUCCESS; } /* If we reach here, we were a bucket just full of CRLFs, so * just toss the bucket. */ /* FIXME: Is this the right thing to do in the core? */ apr_bucket_delete(e); } return APR_SUCCESS; } /* If mode is EXHAUSTIVE, we want to just read everything until the end * of the brigade, which in this case means the end of the socket. * To do this, we attach the brigade that has currently been setaside to * the brigade that was passed down, and send that brigade back. * * NOTE: This is VERY dangerous to use, and should only be done with * extreme caution. FWLIW, this would be needed by an MPM like Perchild; * such an MPM can easily request the socket and all data that has been * read, which means that it can pass it to the correct child process. */ if (mode == AP_MODE_EXHAUSTIVE) { apr_bucket *e; /* Tack on any buckets that were set aside. */ APR_BRIGADE_CONCAT(b, ctx->b); /* Since we've just added all potential buckets (which will most * likely simply be the socket bucket) we know this is the end, * so tack on an EOS too. */ /* We have read until the brigade was empty, so we know that we * must be EOS. */ e = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; } /* read up to the amount they specified. */ if (mode == AP_MODE_READBYTES || mode == AP_MODE_SPECULATIVE) { apr_bucket *e; AP_DEBUG_ASSERT(readbytes > 0); e = APR_BRIGADE_FIRST(ctx->b); rv = apr_bucket_read(e, &str, &len, block); if (APR_STATUS_IS_EAGAIN(rv) && block == APR_NONBLOCK_READ) { /* getting EAGAIN for a blocking read is an error; for a * non-blocking read, return an empty brigade. */ return APR_SUCCESS; } else if (rv != APR_SUCCESS) { return rv; } else if (block == APR_BLOCK_READ && len == 0) { /* We wanted to read some bytes in blocking mode. We read * 0 bytes. Hence, we now assume we are EOS. * * When we are in normal mode, return an EOS bucket to the * caller. * When we are in speculative mode, leave ctx->b empty, so * that the next call returns an EOS bucket. */ apr_bucket_delete(e); if (mode == AP_MODE_READBYTES) { e = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); } return APR_SUCCESS; } /* Have we read as much data as we wanted (be greedy)? */ if (len < readbytes) { apr_size_t bucket_len; rv = APR_SUCCESS; /* We already registered the data in e in len */ e = APR_BUCKET_NEXT(e); while ((len < readbytes) && (rv == APR_SUCCESS) && (e != APR_BRIGADE_SENTINEL(ctx->b))) { /* Check for the availability of buckets with known length */ if (e->length != -1) { len += e->length; e = APR_BUCKET_NEXT(e); } else { /* * Read from bucket, but non blocking. If there isn't any * more data, well than this is fine as well, we will * not wait for more since we already got some and we are * only checking if there isn't more. */ rv = apr_bucket_read(e, &str, &bucket_len, APR_NONBLOCK_READ); if (rv == APR_SUCCESS) { len += bucket_len; e = APR_BUCKET_NEXT(e); } } } } /* We can only return at most what we read. */ if (len < readbytes) { readbytes = len; } rv = apr_brigade_partition(ctx->b, readbytes, &e); if (rv != APR_SUCCESS) { return rv; } /* Must do move before CONCAT */ ctx->tmpbb = apr_brigade_split_ex(ctx->b, e, ctx->tmpbb); if (mode == AP_MODE_READBYTES) { APR_BRIGADE_CONCAT(b, ctx->b); } else if (mode == AP_MODE_SPECULATIVE) { apr_bucket *copy_bucket; for (e = APR_BRIGADE_FIRST(ctx->b); e != APR_BRIGADE_SENTINEL(ctx->b); e = APR_BUCKET_NEXT(e)) { rv = apr_bucket_copy(e, ©_bucket); if (rv != APR_SUCCESS) { return rv; } APR_BRIGADE_INSERT_TAIL(b, copy_bucket); } } /* Take what was originally there and place it back on ctx->b */ APR_BRIGADE_CONCAT(ctx->b, ctx->tmpbb); } return APR_SUCCESS; }
} dir->dirhand = INVALID_HANDLE_VALUE; return APR_SUCCESS; } APR_DECLARE(apr_status_t) apr_dir_open(apr_dir_t **new, const char *dirname, apr_pool_t *pool) { apr_status_t rv; apr_size_t len = strlen(dirname); (*new) = apr_pcalloc(pool, sizeof(apr_dir_t)); /* Leave room here to add and pop the '*' wildcard for FindFirstFile * and double-null terminate so we have one character to change. */ (*new)->dirname = apr_palloc(pool, len + 3); memcpy((*new)->dirname, dirname, len); if (len && (*new)->dirname[len - 1] != '/') { (*new)->dirname[len++] = '/'; } (*new)->dirname[len++] = '\0'; (*new)->dirname[len] = '\0'; #if APR_HAS_UNICODE_FS IF_WIN_OS_IS_UNICODE { /* Create a buffer for the longest file name we will ever see */ (*new)->w.entry = apr_pcalloc(pool, sizeof(WIN32_FIND_DATAW)); (*new)->name = apr_pcalloc(pool, APR_FILE_MAX * 3 + 1); }
/** Create ASR session */ ASR_CLIENT_DECLARE(asr_session_t*) asr_session_create(asr_engine_t *engine, const char *profile) { mpf_termination_t *termination; mrcp_channel_t *channel; mrcp_session_t *session; const mrcp_app_message_t *app_message; apr_pool_t *pool; asr_session_t *asr_session; mpf_stream_capabilities_t *capabilities; /* create session */ session = mrcp_application_session_create(engine->mrcp_app,profile,NULL); if(!session) { return NULL; } pool = mrcp_application_session_pool_get(session); asr_session = apr_palloc(pool,sizeof(asr_session_t)); mrcp_application_session_object_set(session,asr_session); /* create source stream capabilities */ capabilities = mpf_source_stream_capabilities_create(pool); /* add codec capabilities (Linear PCM) */ mpf_codec_capabilities_add( &capabilities->codecs, MPF_SAMPLE_RATE_8000, "LPCM"); termination = mrcp_application_audio_termination_create( session, /* session, termination belongs to */ &audio_stream_vtable, /* virtual methods table of audio stream */ capabilities, /* capabilities of audio stream */ asr_session); /* object to associate */ channel = mrcp_application_channel_create( session, /* session, channel belongs to */ MRCP_RECOGNIZER_RESOURCE, /* MRCP resource identifier */ termination, /* media termination, used to terminate audio stream */ NULL, /* RTP descriptor, used to create RTP termination (NULL by default) */ asr_session); /* object to associate */ if(!channel) { mrcp_application_session_destroy(session); return NULL; } asr_session->engine = engine; asr_session->mrcp_session = session; asr_session->mrcp_channel = channel; asr_session->recog_complete = NULL; asr_session->input_mode = INPUT_MODE_NONE; asr_session->streaming = FALSE; asr_session->audio_in = NULL; asr_session->media_buffer = NULL; asr_session->mutex = NULL; asr_session->wait_object = NULL; asr_session->app_message = NULL; /* Create cond wait object and mutex */ apr_thread_mutex_create(&asr_session->mutex,APR_THREAD_MUTEX_DEFAULT,pool); apr_thread_cond_create(&asr_session->wait_object,pool); /* Create media buffer */ asr_session->media_buffer = mpf_frame_buffer_create(160,20,pool); /* Send add channel request and wait for the response */ apr_thread_mutex_lock(asr_session->mutex); app_message = NULL; if(mrcp_application_channel_add(asr_session->mrcp_session,asr_session->mrcp_channel) == TRUE) { apr_thread_cond_wait(asr_session->wait_object,asr_session->mutex); app_message = asr_session->app_message; asr_session->app_message = NULL; } apr_thread_mutex_unlock(asr_session->mutex); if(sig_response_check(app_message) == FALSE) { asr_session_destroy_ex(asr_session,TRUE); return NULL; } return asr_session; }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; /* Set up our rl_ctx_t on first use */ if (ctx == NULL) { const char *rl = NULL; int ratelimit; int burst = 0; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: rate limit */ rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: optional initial burst */ rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst"); if (rl != NULL) { burst = atoi(rl) * 1024; if (burst <= 0) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl); burst = 0; } } /* Set up our context */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; ctx->burst = burst; ctx->do_sleep = 0; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } else { APR_BRIGADE_PREPEND(bb, ctx->holdingbb); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *e; if (ctx->state == RATE_FULLSPEED) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_LIMIT; break; } } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, e); rv = ap_pass_brigade(f->next, bb); apr_brigade_cleanup(bb); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); return rv; } } else { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_off_t len = ctx->chunk_size + ctx->burst; APR_BRIGADE_CONCAT(ctx->tmpbb, bb); /* * Pull next chunk of data; the initial amount is our * burst allotment (if any) plus a chunk. All subsequent * iterations are just chunks with whatever remaining * burst amounts we have left (in case not done in the * first bucket). */ rv = apr_brigade_partition(ctx->tmpbb, len, &e); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); return rv; } /* Send next metadata now if any */ while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb) && APR_BUCKET_IS_METADATA(e)) { e = APR_BUCKET_NEXT(e); } if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) { apr_brigade_split_ex(ctx->tmpbb, e, bb); } else { apr_brigade_length(ctx->tmpbb, 1, &len); } /* * Adjust the burst amount depending on how much * we've done up to now. */ if (ctx->burst) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len); if (len < ctx->burst) { ctx->burst -= len; } else { ctx->burst = 0; } } e = APR_BRIGADE_LAST(ctx->tmpbb); if (APR_BUCKET_IS_EOS(e)) { ap_remove_output_filter(f); } else if (!APR_BUCKET_IS_FLUSH(e)) { if (APR_BRIGADE_EMPTY(bb)) { /* Wait for more (or next call) */ break; } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif /* RLFDEBUG */ if (ctx->do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { ctx->do_sleep = 1; } rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { /* Most often, user disconnects from stream */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); return rv; } } } if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { /* Any rate-limited data in tmpbb is sent unlimited along * with the rest. */ APR_BRIGADE_CONCAT(bb, ctx->tmpbb); APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); #endif /* RLFDEBUG */ /* Save remaining tmpbb with the correct lifetime for the next call */ return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool); }
void ConsoleUI::handleUsageHelp(const char * cpszUsage, const char * cpszHelp, const char * cpszHelpFlags) { std::vector<const char *> opts; // Vector of possible option (start points) unsigned int i; apr_status_t rv; unsigned int nHelpFlags; if (cpszHelpFlags == NULL) cpszHelpFlags = "[--help | -?]\t\n"; iv_cpszUsage[0] = cpszHelpFlags; iv_cpszUsage[1] = cpszUsage; iv_cpszHelp = cpszHelp; // Step thru the usage msg to and create a vector addressing the start of each option // Options can be switches or take a value, and be marked as optional (most options are // are optional [!] but taftest_app requires one and only one of --config & --configs) // --opt --opt <val> --opt=<val> // [--opt] [--opt <val>] [--opt=<val>] [-c] [-f foo] [-x | -y] // Assume a long option starts with -- preceeded by [ or whitespace // Assume a short option starts with - preceeded by [ or space and is followed by ] or space // First process help flags then the remainder of the usage message. for ( i = 0; i < 2; ++i ) { const char * str = iv_cpszUsage[i]; if ( *str == '-' && *++str == '-' ) // Check for initial -- where str[-1] would be bad! opts.push_back(str); while ((str = strchr(str+1, '-')) != NULL) { if ( ( str[1]=='-' && (str[-1]=='[' || isspace(str[-1])) ) || ( str[1]!='-' && isgraph(str[1]) && (str[-1]=='[' || str[-1]==' ') && (str[2]==']' || str[2]==' ') ) ) opts.push_back(++str); } if (i == 0) nHelpFlags = opts.size(); } iv_pOpts = (apr_getopt_option_t *)apr_palloc( consPool, (opts.size()+1) * sizeof(apr_getopt_option_t) ); // Process short and long options in two passes as must use a wierd value for optch to // locate long options, so can have no more than 47 long options (1 - '0') int j = 0; const char * opt, * str; int k = 0; char helpChars[20]; for ( i = 0; i < opts.size(); ++i ) { opt = opts[i]; if ( *opt == '-' ) { // Long option str = strpbrk( ++opt, "]= \n" ); if (str == NULL) // Must be last option! str = opt + strlen(opt); iv_pOpts[j].name = apr_pstrndup(consPool, opt, str - opt); iv_pOpts[j].has_arg = (*str == '=' || (*str == ' ' && *(str+1) == '<')); iv_pOpts[j].optch = j+1; // Use 1-based index as "short" option if (i < nHelpFlags) helpChars[k++] = j+1; ++j; } } iv_maxOptIndex = j; // Largest wierd value marking a long option // NOTE: should check that this is < any alphanumeric char, i.e. < 48 ('0') for ( i = 0; i < opts.size(); ++i ) { opt = opts[i]; if ( *opt != '-' ) { // Short option iv_pOpts[j].optch = *opt; // Use real character iv_pOpts[j].name = NULL; iv_pOpts[j++].has_arg = (opt[1] == ' ' && opt[2] != '|' ); if (i < nHelpFlags) helpChars[k++] = *opt; } } iv_pOpts[j].optch = 0; // Mark end of list helpChars[k] = '\0'; #ifndef NDEBUG // Dump all legal options when called with just 1 arg of "--" if ( iv_argc == 2 && strcmp(iv_argv[1],"--")==0 ) debugDisplayOptions(j); #endif // Set up processing of interleaved options and arguments apr_getopt_init(&iv_hGetopt, consPool, iv_argc, iv_argv); iv_hGetopt->interleave = 1; // Can't have more options than args! iv_pFoundOpts = (found_opt_t *)apr_palloc(consPool, iv_argc * sizeof(found_opt_t)); iv_nFoundOpts = 0; // Check that provided options are valid and save them. int index; const char * optarg; bool needHelp = false; while ((rv = apr_getopt_long(iv_hGetopt, iv_pOpts, &index, &optarg)) == APR_SUCCESS) { iv_pFoundOpts[iv_nFoundOpts].index = index; iv_pFoundOpts[iv_nFoundOpts++].value = optarg; if (strchr(helpChars,index) != NULL) needHelp = true; } // Two errors are: Invalid option & missing argument // getopt prints these on error streamwith fprintf(stderr but can override // via iv_hGetopt->errfn typedef void( apr_getopt_err_fn_t)(void *arg, const char *err,...) if ( rv != APR_EOF ) displayUsage(); // Args are valid ... now check for the help flags else if (needHelp) displayHelp(); }
apr_status_t apr_file_cleanup(void *thefile) { apr_file_t *file = thefile; return apr_file_close(file); } APR_DECLARE(apr_status_t) apr_file_open(apr_file_t **new, const char *fname, apr_int32_t flag, apr_fileperms_t perm, apr_pool_t *pool) { int oflags = 0; int mflags = OPEN_FLAGS_FAIL_ON_ERROR|OPEN_SHARE_DENYNONE|OPEN_FLAGS_NOINHERIT; int rv; ULONG action; apr_file_t *dafile = (apr_file_t *)apr_palloc(pool, sizeof(apr_file_t)); dafile->pool = pool; dafile->isopen = FALSE; dafile->eof_hit = FALSE; dafile->buffer = NULL; dafile->flags = flag; dafile->blocking = BLK_ON; if ((flag & APR_READ) && (flag & APR_WRITE)) { mflags |= OPEN_ACCESS_READWRITE; } else if (flag & APR_READ) { mflags |= OPEN_ACCESS_READONLY; } else if (flag & APR_WRITE) { mflags |= OPEN_ACCESS_WRITEONLY; } else {
svn_diff__lcs_t * svn_diff__lcs(svn_diff__position_t *position_list1, /* pointer to tail (ring) */ svn_diff__position_t *position_list2, /* pointer to tail (ring) */ apr_pool_t *pool) { int idx; apr_off_t length[2]; svn_diff__snake_t *fp; apr_off_t d; apr_off_t k; apr_off_t p = 0; svn_diff__lcs_t *lcs, *lcs_freelist = NULL; svn_diff__position_t sentinel_position[2]; /* Since EOF is always a sync point we tack on an EOF link * with sentinel positions */ lcs = apr_palloc(pool, sizeof(*lcs)); lcs->position[0] = apr_pcalloc(pool, sizeof(*lcs->position[0])); lcs->position[0]->offset = position_list1 ? position_list1->offset + 1 : 1; lcs->position[1] = apr_pcalloc(pool, sizeof(*lcs->position[1])); lcs->position[1]->offset = position_list2 ? position_list2->offset + 1 : 1; lcs->length = 0; lcs->refcount = 1; lcs->next = NULL; if (position_list1 == NULL || position_list2 == NULL) return lcs; /* Calculate length of both sequences to be compared */ length[0] = position_list1->offset - position_list1->next->offset + 1; length[1] = position_list2->offset - position_list2->next->offset + 1; idx = length[0] > length[1] ? 1 : 0; /* strikerXXX: here we allocate the furthest point array, which is * strikerXXX: sized M + N + 3 (!) */ fp = apr_pcalloc(pool, sizeof(*fp) * (apr_size_t)(length[0] + length[1] + 3)); fp += length[idx] + 1; sentinel_position[idx].next = position_list1->next; position_list1->next = &sentinel_position[idx]; sentinel_position[idx].offset = position_list1->offset + 1; sentinel_position[abs(1 - idx)].next = position_list2->next; position_list2->next = &sentinel_position[abs(1 - idx)]; sentinel_position[abs(1 - idx)].offset = position_list2->offset + 1; /* These are never dereferenced, only compared by value, so we * can safely fake these up and the void* cast is OK. */ sentinel_position[0].node = (void*)&sentinel_position[0]; sentinel_position[1].node = (void*)&sentinel_position[1]; d = length[abs(1 - idx)] - length[idx]; /* k = -1 will be the first to be used to get previous * position information from, make sure it holds sane * data */ fp[-1].position[0] = sentinel_position[0].next; fp[-1].position[1] = &sentinel_position[1]; p = 0; do { /* Forward */ for (k = -p; k < d; k++) { svn_diff__snake(k, fp, idx, &lcs_freelist, pool); } for (k = d + p; k >= d; k--) { svn_diff__snake(k, fp, idx, &lcs_freelist, pool); } p++; } while (fp[d].position[1] != &sentinel_position[1]); lcs->next = fp[d].lcs; lcs = svn_diff__lcs_reverse(lcs); position_list1->next = sentinel_position[idx].next; position_list2->next = sentinel_position[abs(1 - idx)].next; return lcs; }
APR_DECLARE(apr_status_t) apr_file_pipe_create(apr_file_t **in, apr_file_t **out, apr_pool_t *pool) { ULONG filedes[2]; ULONG rc, action; static int id = 0; char pipename[50]; sprintf(pipename, "/pipe/%d.%d", getpid(), id++); rc = DosCreateNPipe(pipename, filedes, NP_ACCESS_INBOUND, NP_NOWAIT|1, 4096, 4096, 0); if (rc) return APR_FROM_OS_ERROR(rc); rc = DosConnectNPipe(filedes[0]); if (rc && rc != ERROR_PIPE_NOT_CONNECTED) { DosClose(filedes[0]); return APR_FROM_OS_ERROR(rc); } rc = DosOpen (pipename, filedes+1, &action, 0, FILE_NORMAL, OPEN_ACTION_OPEN_IF_EXISTS | OPEN_ACTION_FAIL_IF_NEW, OPEN_ACCESS_WRITEONLY | OPEN_SHARE_DENYREADWRITE, NULL); if (rc) { DosClose(filedes[0]); return APR_FROM_OS_ERROR(rc); } (*in) = (apr_file_t *)apr_palloc(pool, sizeof(apr_file_t)); rc = DosCreateEventSem(NULL, &(*in)->pipeSem, DC_SEM_SHARED, FALSE); if (rc) { DosClose(filedes[0]); DosClose(filedes[1]); return APR_FROM_OS_ERROR(rc); } rc = DosSetNPipeSem(filedes[0], (HSEM)(*in)->pipeSem, 1); if (!rc) { rc = DosSetNPHState(filedes[0], NP_WAIT); } if (rc) { DosClose(filedes[0]); DosClose(filedes[1]); DosCloseEventSem((*in)->pipeSem); return APR_FROM_OS_ERROR(rc); } (*in)->pool = pool; (*in)->filedes = filedes[0]; (*in)->fname = apr_pstrdup(pool, pipename); (*in)->isopen = TRUE; (*in)->buffered = FALSE; (*in)->flags = APR_FOPEN_READ; (*in)->pipe = 1; (*in)->timeout = -1; (*in)->blocking = BLK_ON; (*in)->ungetchar = -1; apr_pool_cleanup_register(pool, *in, apr_file_cleanup, apr_pool_cleanup_null); (*out) = (apr_file_t *)apr_palloc(pool, sizeof(apr_file_t)); rc = DosCreateEventSem(NULL, &(*out)->pipeSem, DC_SEM_SHARED, FALSE); if (rc) { DosClose(filedes[0]); DosClose(filedes[1]); DosCloseEventSem((*in)->pipeSem); return APR_FROM_OS_ERROR(rc); } rc = DosSetNPipeSem(filedes[1], (HSEM)(*out)->pipeSem, 1); if (rc) { DosClose(filedes[0]); DosClose(filedes[1]); DosCloseEventSem((*in)->pipeSem); DosCloseEventSem((*out)->pipeSem); return APR_FROM_OS_ERROR(rc); } (*out)->pool = pool; (*out)->filedes = filedes[1]; (*out)->fname = apr_pstrdup(pool, pipename); (*out)->isopen = TRUE; (*out)->buffered = FALSE; (*out)->flags = APR_FOPEN_WRITE; (*out)->pipe = 1; (*out)->timeout = -1; (*out)->blocking = BLK_ON; (*out)->ungetchar = -1; apr_pool_cleanup_register(pool, *out, apr_file_cleanup, apr_pool_cleanup_null); return APR_SUCCESS; }
static APR_INLINE void svn_diff__snake(apr_off_t k, svn_diff__snake_t *fp, int idx, svn_diff__lcs_t **freelist, apr_pool_t *pool) { svn_diff__position_t *start_position[2]; svn_diff__position_t *position[2]; svn_diff__lcs_t *lcs; svn_diff__lcs_t *previous_lcs; /* The previous entry at fp[k] is going to be replaced. See if we * can mark that lcs node for reuse, because the sequence up to this * point was a dead end. */ lcs = fp[k].lcs; while (lcs) { lcs->refcount--; if (lcs->refcount) break; previous_lcs = lcs->next; lcs->next = *freelist; *freelist = lcs; lcs = previous_lcs; } if (fp[k - 1].y + 1 > fp[k + 1].y) { start_position[0] = fp[k - 1].position[0]; start_position[1] = fp[k - 1].position[1]->next; previous_lcs = fp[k - 1].lcs; } else { start_position[0] = fp[k + 1].position[0]->next; start_position[1] = fp[k + 1].position[1]; previous_lcs = fp[k + 1].lcs; } /* ### Optimization, skip all positions that don't have matchpoints * ### anyway. Beware of the sentinel, don't skip it! */ position[0] = start_position[0]; position[1] = start_position[1]; while (position[0]->node == position[1]->node) { position[0] = position[0]->next; position[1] = position[1]->next; } if (position[1] != start_position[1]) { lcs = *freelist; if (lcs) { *freelist = lcs->next; } else { lcs = apr_palloc(pool, sizeof(*lcs)); } lcs->position[idx] = start_position[0]; lcs->position[abs(1 - idx)] = start_position[1]; lcs->length = position[1]->offset - start_position[1]->offset; lcs->next = previous_lcs; lcs->refcount = 1; fp[k].lcs = lcs; } else { fp[k].lcs = previous_lcs; } if (previous_lcs) { previous_lcs->refcount++; } fp[k].position[0] = position[0]; fp[k].position[1] = position[1]; fp[k].y = position[1]->offset; }
static char *get_pg_grp(request_rec * r, char *group, char *user, pg_auth_config_rec * sec) { char query[MAX_STRING_LEN]; char *safe_user; char *safe_group; int n; safe_user = apr_palloc(r->pool, 1 + 2 * strlen(user)); safe_group = apr_palloc(r->pool, 1 + 2 * strlen(group)); #ifdef DEBUG_AUTH_PGSQL ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "[mod_auth_pgsql.c] - get_pg_grp - going to retrieve group for user \"%s\" from database", user); #endif /* DEBUG_AUTH_PGSQL */ query[0] = '\0'; pg_check_string(safe_user, user, strlen(user)); pg_check_string(safe_group, group, strlen(group)); if ((!sec->auth_pg_grp_table) || (!sec->auth_pg_grp_group_field) || (!sec->auth_pg_grp_user_field)) { apr_snprintf(pg_errstr, MAX_STRING_LEN, "PG: Missing parameters for password lookup: %s%s%s", (sec->auth_pg_grp_table ? "" : "Group table name"), (sec-> auth_pg_grp_group_field ? "" : "GroupID field name "), (sec-> auth_pg_grp_user_field ? "" : "Group table user field name ")); return NULL; }; if (sec->auth_pg_lowercaseuid) { /* and force it to lowercase */ n = 0; while (safe_user[n] && n < (MAX_STRING_LEN - 1)) { if (isupper(safe_user[n])) { safe_user[n] = tolower(safe_user[n]); } n++; } } if (sec->auth_pg_uppercaseuid) { /* and force it to uppercase */ n = 0; while (safe_user[n] && n < (MAX_STRING_LEN - 1)) { if (islower(safe_user[n])) { safe_user[n] = toupper(safe_user[n]); } n++; } } n = apr_snprintf(query, MAX_STRING_LEN, "select %s from %s where %s='%s' and %s='%s' %s", sec->auth_pg_grp_group_field, sec->auth_pg_grp_table, sec->auth_pg_grp_user_field, safe_user, sec->auth_pg_grp_group_field, safe_group, sec->auth_pg_grp_whereclause ? sec-> auth_pg_grp_whereclause : ""); if (n < 0 || n > MAX_STRING_LEN) { apr_snprintf(pg_errstr, MAX_STRING_LEN, "PG: Detected SQL-truncation attack. Auth aborted."); return NULL; } return do_pg_query(r, query, sec); }
static const char *filter_chain(cmd_parms *cmd, void *CFG, const char *arg) { mod_filter_chain *p; mod_filter_chain *q; mod_filter_cfg *cfg = CFG; switch (arg[0]) { case '+': /* add to end of chain */ p = apr_pcalloc(cmd->pool, sizeof(mod_filter_chain)); p->fname = arg+1; if (cfg->chain) { for (q = cfg->chain; q->next; q = q->next); q->next = p; } else { cfg->chain = p; } break; case '@': /* add to start of chain */ p = apr_palloc(cmd->pool, sizeof(mod_filter_chain)); p->fname = arg+1; p->next = cfg->chain; cfg->chain = p; break; case '-': /* remove from chain */ if (cfg->chain) { if (strcasecmp(cfg->chain->fname, arg+1)) { for (p = cfg->chain; p->next; p = p->next) { if (!strcasecmp(p->next->fname, arg+1)) { p->next = p->next->next; } } } else { cfg->chain = cfg->chain->next; } } break; case '!': /* Empty the chain */ /** IG: Add a NULL provider to the beginning so that * we can ensure that we'll empty everything before * this when doing config merges later */ p = apr_pcalloc(cmd->pool, sizeof(mod_filter_chain)); p->fname = NULL; cfg->chain = p; break; case '=': /* initialise chain with this arg */ /** IG: Prepend a NULL provider to the beginning as above */ p = apr_pcalloc(cmd->pool, sizeof(mod_filter_chain)); p->fname = NULL; p->next = apr_pcalloc(cmd->pool, sizeof(mod_filter_chain)); p->next->fname = arg+1; cfg->chain = p; break; default: /* add to end */ p = apr_pcalloc(cmd->pool, sizeof(mod_filter_chain)); p->fname = arg; if (cfg->chain) { for (q = cfg->chain; q->next; q = q->next); q->next = p; } else { cfg->chain = p; } break; } return NULL; }
static oidc_provider_xml* amx_newOIDCProviderXml(pool* p){ oidc_provider_xml* ret; ret=apr_palloc(p,sizeof(oidc_provider_xml)); ret->metadataUrl=NULL; return ret; }
/* * LDAP Parse URL : * Use the ldap url parsing routines to break up the LDAP URL into * host and port. * Is out of set_field because it is very big stuff */ static const char *vhx_ldap_parse_url(cmd_parms *cmd, void *dummy, const char *url) { int result; apr_ldap_url_desc_t *urld; apr_ldap_err_t *ldap_result; vhx_config_rec *vhr = (vhx_config_rec *) ap_get_module_config(cmd->server->module_config, &vhostx_module); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: `%s'", url); result = apr_ldap_url_parse(cmd->pool, url, &(urld), &(ldap_result)); if (result != APR_SUCCESS) { VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, "ldap url not parsed : %s.", ldap_result->reason); return ldap_result->reason; } vhr->ldap_url = apr_pstrdup(cmd->pool, url); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: Host: %s", urld->lud_host); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: Port: %d", urld->lud_port); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: DN: %s", urld->lud_dn); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: attrib: %s", urld->lud_attrs? urld->lud_attrs[0] : "(null)"); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: scope: %s", (urld->lud_scope == LDAP_SCOPE_SUBTREE? "subtree" : urld->lud_scope == LDAP_SCOPE_BASE? "base" : urld->lud_scope == LDAP_SCOPE_ONELEVEL? "onelevel" : "unknown")); VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "ldap url parse: filter: %s", urld->lud_filter); /* Set all the values, or at least some sane defaults */ if (vhr->ldap_host) { char *p = apr_palloc(cmd->pool, strlen(vhr->ldap_host) + strlen(urld->lud_host) + 2); strcpy(p, urld->lud_host); strcat(p, " "); strcat(p, vhr->ldap_host); vhr->ldap_host = p; } else { vhr->ldap_host = urld->lud_host? apr_pstrdup(cmd->pool, urld->lud_host) : "localhost"; } vhr->ldap_basedn = urld->lud_dn ? apr_pstrdup(cmd->pool, urld->lud_dn) : NULL; vhr->ldap_scope = urld->lud_scope == LDAP_SCOPE_ONELEVEL ? LDAP_SCOPE_ONELEVEL : LDAP_SCOPE_SUBTREE; if (urld->lud_filter) { if (urld->lud_filter[0] == '(') { /* * Get rid of the surrounding parens; later on when generating the * filter, they'll be put back. */ vhr->ldap_filter = apr_pstrdup(cmd->pool, urld->lud_filter+1); vhr->ldap_filter[strlen(vhr->ldap_filter)-1] = '\0'; } else { vhr->ldap_filter = apr_pstrdup(cmd->pool, urld->lud_filter); } } else { vhr->ldap_filter = "objectClass=apacheConfig"; } /* * "ldaps" indicates secure ldap connections desired */ if (apr_strnatcasecmp(url, "ldaps") == 0) { vhr->ldap_secure = 1; vhr->ldap_port = urld->lud_port? urld->lud_port : LDAPS_PORT; VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "LDAP: using SSL connections"); } else { vhr->ldap_secure = 0; vhr->ldap_port = urld->lud_port? urld->lud_port : LDAP_PORT; VH_AP_LOG_ERROR(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, cmd->server, "LDAP: not using SSL connections"); } vhr->ldap_have_url = 1; return NULL; }
static int pgasp_handler (request_rec * r) { char cursor_string[256]; pgasp_config* config = (pgasp_config*) ap_get_module_config(r->server->module_config, &pgasp_module ) ; pgasp_dir_config* dir_config = (pgasp_dir_config*) ap_get_module_config(r->per_dir_config, &pgasp_module ) ; apr_table_t * GET = NULL, *GETargs = NULL; apr_array_header_t * POST; PGconn * pgc; PGresult * pgr; int i, j, allowed_to_serve, filename_length = 0; int field_count, tuple_count; char * requested_file; char *basename; params_t params; /* PQexecParams doesn't seem to like zero-length strings, so we feed it a dummy */ const char * dummy_get = "nothing"; const char * dummy_user = "******"; const char * cursor_values[2] = { r -> args ? apr_pstrdup(r->pool, r -> args) : dummy_get, r->user ? r->user : dummy_user }; int cursor_value_lengths[2] = { strlen(cursor_values[0]), strlen(cursor_values[1]) }; int cursor_value_formats[2] = { 0, 0 }; if (!r -> handler || strcmp (r -> handler, "pgasp-handler") ) return DECLINED; if (!r -> method || (strcmp (r -> method, "GET") && strcmp (r -> method, "POST")) ) return DECLINED; if (config->is_enabled != true) return OK; /* pretending we have responded, may return DECLINED in the future */ requested_file = apr_pstrdup (r -> pool, r -> path_info /*filename*/); i = strlen(requested_file) - 1; while (i > 0) { if (requested_file[i] == '.') filename_length = i; if (requested_file[i] == '/') break; i--; } if (i >= 0) { requested_file += i+1; /* now pointing to foo.pgasp instead of /var/www/.../foo.pgasp */ if (filename_length > i) filename_length -= i+1; } allowed_to_serve = false; for (i = 0; i < config->allowed_count; i++) { if (!strcmp(config->allowed[i], requested_file)) { allowed_to_serve = true; break; } } if (config->allowed_count == 0) allowed_to_serve = true; if (!allowed_to_serve) { ap_set_content_type(r, "text/plain"); ap_rprintf(r, "Hello there\nThis is PGASP\nEnabled: %s\n", config->is_enabled ? "On" : "Off"); ap_rprintf(r, "Requested: %s\n", requested_file); ap_rprintf(r, "Allowed: %s\n", allowed_to_serve ? "Yes" : "No"); return OK; /* pretending we have served the file, may return HTTP_FORDIDDEN in the future */ } if (filename_length == 0) { basename = requested_file; } else { basename = apr_pstrndup(r->pool, requested_file, filename_length); } ap_args_to_table(r, &GETargs); if (OK != ap_parse_form_data(r, NULL, &POST, -1, (~((apr_size_t)0)))) { __(r->server, " ** ap_parse_form_data is NOT OK"); } GET = (NULL == GET) ? GETargs : apr_table_overlay(r->pool, GETargs, GET); // move all POST parameters into GET table { ap_form_pair_t *pair; char *buffer; apr_off_t len; apr_size_t size; while (NULL != (pair = apr_array_pop(POST))) { apr_brigade_length(pair->value, 1, &len); size = (apr_size_t) len; buffer = apr_palloc(r->pool, size + 1); apr_brigade_flatten(pair->value, buffer, &size); buffer[len] = 0; apr_table_setn(GET, apr_pstrdup(r->pool, pair->name), buffer); //should name and value be ap_unescape_url() -ed? // __(r->server, "POST[%s]: %s", pair->name, buffer); } } params.r = r; params.args = NULL; apr_table_do(tab_args, ¶ms, GET, NULL); params.args = apr_pstrcat(r->pool, "&", params.args, "&", NULL); cursor_values[0] = params.args; cursor_value_lengths[0] = strlen(cursor_values[0]); /* set response content type according to configuration or to default value */ ap_set_content_type(r, dir_config->content_type_set ? dir_config->content_type : "text/html"); /* now connecting to Postgres, getting function output, and printing it */ pgc = pgasp_pool_open (r->server); if (PQstatus(pgc) != CONNECTION_OK) { spit_pg_error ("connect"); pgasp_pool_close(r->server, pgc); return OK; } /* removing extention (.pgasp or other) from file name, and adding "f_" for function name, i.e. foo.pgasp becomes psp_foo() */ snprintf(cursor_string, sizeof(cursor_string), "select * from f_%s($1::varchar)", basename); /* passing GET as first (and only) parameter */ if (0 == PQsendQueryParams (pgc, cursor_string, 1, NULL, cursor_values, cursor_value_lengths, cursor_value_formats, 0)) { spit_pg_error ("sending async query with params"); return clean_up_connection(r->server); } if (0 == PQsetSingleRowMode(pgc)) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server, "can not fall into single raw mode to fetch data"); } while (NULL != (pgr = PQgetResult(pgc))) { if (PQresultStatus(pgr) != PGRES_TUPLES_OK && PQresultStatus(pgr) != PGRES_SINGLE_TUPLE) { spit_pg_error ("fetch data"); return clean_up_connection(r->server); } /* the following counts and for-loop may seem excessive as it's just 1 row/1 field, but might need it in the future */ field_count = PQnfields(pgr); tuple_count = PQntuples(pgr); for (i = 0; i < tuple_count; i++) { for (j = 0; j < field_count; j++) ap_rprintf(r, "%s", PQgetvalue(pgr, i, j)); ap_rprintf(r, "\n"); } PQclear (pgr); } pgasp_pool_close(r->server, pgc); return OK; }
static tee_buffer *create_buffer(apr_pool_t *p) { tee_buffer *b = apr_palloc(p, sizeof(tee_buffer)); b->first = b->last = NULL; return b; }
void InjectLibrary(pid_t pid) { const char *library(CY_LIBRARY); static const size_t Stack_(8 * 1024); size_t length(strlen(library) + 1), depth(sizeof(Baton) + length); depth = (depth + sizeof(uintptr_t) + 1) / sizeof(uintptr_t) * sizeof(uintptr_t); CYPool pool; uint8_t *local(reinterpret_cast<uint8_t *>(apr_palloc(pool, depth))); Baton *baton(reinterpret_cast<Baton *>(local)); baton->__pthread_set_self = &__pthread_set_self; baton->pthread_create = &pthread_create; baton->pthread_join = &pthread_join; baton->dlopen = &dlopen; baton->dlerror = &dlerror; baton->dlsym = &dlsym; baton->mach_thread_self = &mach_thread_self; baton->thread_terminate = &thread_terminate; baton->pid = getpid(); memcpy(baton->library, library, length); vm_size_t size(depth + Stack_); mach_port_t self(mach_task_self()), task; _krncall(task_for_pid(self, pid, &task)); vm_address_t stack; _krncall(vm_allocate(task, &stack, size, true)); vm_address_t data(stack + Stack_); vm_write(task, data, reinterpret_cast<vm_address_t>(baton), depth); thread_act_t thread; _krncall(thread_create(task, &thread)); thread_state_flavor_t flavor; mach_msg_type_number_t count; size_t push; Trampoline *trampoline; #if defined(__arm__) trampoline = &Trampoline_arm_; arm_thread_state_t state; flavor = ARM_THREAD_STATE; count = ARM_THREAD_STATE_COUNT; push = 0; #elif defined(__i386__) trampoline = &Trampoline_i386_; i386_thread_state_t state; flavor = i386_THREAD_STATE; count = i386_THREAD_STATE_COUNT; push = 5; #elif defined(__x86_64__) trampoline = &Trampoline_x86_64_; x86_thread_state64_t state; flavor = x86_THREAD_STATE64; count = x86_THREAD_STATE64_COUNT; push = 2; #else #error XXX: implement #endif vm_address_t code; _krncall(vm_allocate(task, &code, trampoline->size_, true)); vm_write(task, code, reinterpret_cast<vm_address_t>(trampoline->data_), trampoline->size_); _krncall(vm_protect(task, code, trampoline->size_, false, VM_PROT_READ | VM_PROT_EXECUTE)); /* printf("_ptss:%p\n", baton->__pthread_set_self); printf("dlsym:%p\n", baton->dlsym); printf("code:%zx\n", (size_t) code); */ uint32_t frame[push]; if (sizeof(frame) != 0) memset(frame, 0, sizeof(frame)); memset(&state, 0, sizeof(state)); mach_msg_type_number_t read(count); _krncall(thread_get_state(thread, flavor, reinterpret_cast<thread_state_t>(&state), &read)); _assert(count == count); #if defined(__arm__) state.r[0] = data; state.sp = stack + Stack_; state.pc = code + trampoline->entry_; if ((state.pc & 0x1) != 0) { state.pc &= ~0x1; state.cpsr |= 0x20; } #elif defined(__i386__) frame[1] = data; state.__eip = code + trampoline->entry_; state.__esp = stack + Stack_ - sizeof(frame); #elif defined(__x86_64__) frame[0] = 0xdeadbeef; state.__rdi = data; state.__rip = code + trampoline->entry_; state.__rsp = stack + Stack_ - sizeof(frame); #else #error XXX: implement #endif if (sizeof(frame) != 0) vm_write(task, stack + Stack_ - sizeof(frame), reinterpret_cast<vm_address_t>(frame), sizeof(frame)); _krncall(thread_set_state(thread, flavor, reinterpret_cast<thread_state_t>(&state), count)); _krncall(thread_resume(thread)); _krncall(mach_port_deallocate(self, task)); }
static svn_error_t * file_rev_handler(void *baton, const char *path, svn_revnum_t revnum, apr_hash_t *rev_props, svn_boolean_t merged_revision, svn_txdelta_window_handler_t *content_delta_handler, void **content_delta_baton, apr_array_header_t *prop_diffs, apr_pool_t *pool) { struct file_rev_baton *frb = baton; svn_stream_t *last_stream; svn_stream_t *cur_stream; struct delta_baton *delta_baton; apr_pool_t *filepool; /* Clear the current pool. */ svn_pool_clear(frb->currpool); /* If this file has a non-textual mime-type, bail out. */ if (! frb->ignore_mime_type) SVN_ERR(check_mimetype(prop_diffs, frb->target, frb->currpool)); if (frb->ctx->notify_func2) { svn_wc_notify_t *notify = svn_wc_create_notify_url( svn_path_url_add_component2(frb->repos_root_url, path+1, pool), svn_wc_notify_blame_revision, pool); notify->path = path; notify->kind = svn_node_none; notify->content_state = notify->prop_state = svn_wc_notify_state_inapplicable; notify->lock_state = svn_wc_notify_lock_state_inapplicable; notify->revision = revnum; notify->rev_props = rev_props; frb->ctx->notify_func2(frb->ctx->notify_baton2, notify, pool); } if (frb->ctx->cancel_func) SVN_ERR(frb->ctx->cancel_func(frb->ctx->cancel_baton)); /* If there were no content changes, we couldn't care less about this revision now. Note that we checked the mime type above, so things work if the user just changes the mime type in a commit. Also note that we don't switch the pools in this case. This is important, since the tempfile will be removed by the pool and we need the tempfile from the last revision with content changes. */ if (!content_delta_handler) return SVN_NO_ERROR; frb->merged_revision = merged_revision; /* Create delta baton. */ delta_baton = apr_palloc(frb->currpool, sizeof(*delta_baton)); /* Prepare the text delta window handler. */ if (frb->last_filename) SVN_ERR(svn_stream_open_readonly(&delta_baton->source_stream, frb->last_filename, frb->currpool, pool)); else /* Means empty stream below. */ delta_baton->source_stream = NULL; last_stream = svn_stream_disown(delta_baton->source_stream, pool); if (frb->include_merged_revisions && !frb->merged_revision) filepool = frb->filepool; else filepool = frb->currpool; SVN_ERR(svn_stream_open_unique(&cur_stream, &delta_baton->filename, NULL, svn_io_file_del_on_pool_cleanup, filepool, filepool)); /* Get window handler for applying delta. */ svn_txdelta_apply(last_stream, cur_stream, NULL, NULL, frb->currpool, &delta_baton->wrapped_handler, &delta_baton->wrapped_baton); /* Wrap the window handler with our own. */ delta_baton->file_rev_baton = frb; *content_delta_handler = window_handler; *content_delta_baton = delta_baton; /* Create the rev structure. */ frb->rev = apr_pcalloc(frb->mainpool, sizeof(struct rev)); if (revnum < frb->start_rev) { /* We shouldn't get more than one revision before the starting revision (unless of including merged revisions). */ SVN_ERR_ASSERT((frb->last_filename == NULL) || frb->include_merged_revisions); /* The file existed before start_rev; generate no blame info for lines from this revision (or before). */ frb->rev->revision = SVN_INVALID_REVNUM; } else { SVN_ERR_ASSERT(revnum <= frb->end_rev); /* Set values from revision props. */ frb->rev->revision = revnum; frb->rev->rev_props = svn_prop_hash_dup(rev_props, frb->mainpool); } if (frb->include_merged_revisions) frb->rev->path = apr_pstrdup(frb->mainpool, path); return SVN_NO_ERROR; }