Beispiel #1
0
// Put the list of net media in a parameter list
void SDPMedia::putMedia(NamedList& msg, bool putPort)
{
    msg.addParam("media" + suffix(),"yes");
    msg.addParam("formats" + suffix(),formats());
    msg.addParam("transport" + suffix(),transport());
    if (mappings())
	msg.addParam("rtp_mapping" + suffix(),mappings());
    if (isAudio())
	msg.addParam("rtp_rfc2833",rfc2833());
    if (putPort)
	msg.addParam("rtp_port" + suffix(),remotePort());
    if (remoteCrypto())
	msg.addParam("crypto" + suffix(),remoteCrypto());
    // must handle encryption differently
    const char* enc = getValue("encryption");
    if (enc)
	msg.addParam("encryption" + suffix(),enc);
    clearParam("encryption");
    unsigned int n = length();
    for (unsigned int i = 0; i < n; i++) {
	const NamedString* param = getParam(i);
	if (param)
	    msg.addParam("sdp" + suffix() + "_" + param->name(),*param);
    }
}
Beispiel #2
0
// Put the list of net media in a parameter list
void SDPMedia::putMedia(NamedList& msg, bool putPort)
{
    msg.addParam("media" + suffix(),"yes");
    msg.addParam("formats" + suffix(),formats());
    msg.addParam("transport" + suffix(),transport());
    if (mappings())
	msg.addParam("rtp_mapping" + suffix(),mappings());
    if (isAudio())
	msg.addParam("rtp_rfc2833",rfc2833());
    if (putPort)
	msg.addParam("rtp_port" + suffix(),remotePort());
    if (remoteCrypto())
	msg.addParam("crypto" + suffix(),remoteCrypto());
    // must handle encryption differently
    const char* enc = m_rAttrs.getValue("encryption");
    if (enc)
	msg.addParam("encryption" + suffix(),enc);
    putNamedList(msg, m_rAttrs, "sdp" + suffix() + "_");
    putNamedList(msg, m_fmtps, "fmtp_");
}
Beispiel #3
0
	SearchContext(const Point* points_begin, const Point* points_end)
	{
		auto start_time = std::chrono::system_clock::now();

		auto objects = points_end - points_begin;
		if (objects > std::numeric_limits<int32_t>::max())
			return;

		auto points_count = (int32_t)objects;
		coordinates.xs.resize(points_count);
		coordinates.ys.resize(points_count);
		ids.resize(points_count);
		ordered_points.resize(points_count);

		auto ranks = Ranks(points_count, END_OF_DATA);
		auto points = std::vector<Point>(points_count);
		for (auto it = points_begin; it != points_end; ++it)
		{
			auto idx = it->rank;
			if (idx < 0 || idx >= points_count || ranks[idx] != END_OF_DATA)
				return;
			coordinates.xs[idx] = it->x;
			coordinates.ys[idx] = it->y;
			ids[idx] = it->id;
			ranks[idx] = idx;
			points[idx] = *it;
			ordered_points[idx] = *it;
		}
		Mappings mappings(points_count, coordinates, ids);

		concurrency::parallel_sort(points.begin(), points.end(), [](const Point& a, const Point& b) { return a.x < b.x; });
		for (auto i = 0; i < points_count; ++i)
		{
			mappings.x_sorted_rank_idxes[i] = points[i].rank;
			mappings.rank_idx_to_x_sorted_idxes[points[i].rank] = i;
		}

		concurrency::parallel_sort(points.begin(), points.end(), [](const Point& a, const Point& b) { return a.y < b.y; });
		for (auto i = 0; i < points_count; ++i)
		{
			mappings.y_sorted_rank_idxes[i] = points[i].rank;
			mappings.rank_idx_to_y_sorted_idxes[points[i].rank] = i;
		}

		top = surface_factory(mappings, ranks, 0, false, false);
	}
Beispiel #4
0
QString TableGenerator::readLocaleMappings(const QByteArray &locale)
{
    QString file;
    if (locale.isEmpty())
        return file;

    QFile mappings(systemComposeDir() + QLatin1String("/compose.dir"));
    if (mappings.open(QIODevice::ReadOnly)) {
        const int localeNameLength = locale.size();
        const char * const localeData = locale.constData();

        char l[1024];
        // formating of compose.dir has some inconsistencies
        while (!mappings.atEnd()) {
            int read = mappings.readLine(l, sizeof(l));
            if (read <= 0)
                break;

            char *line = l;
            if (*line >= 'a' && *line <= 'z') {
                // file name
                while (*line && *line != ':' && *line != ' ' && *line != '\t')
                    ++line;
                if (!*line)
                    continue;
                const char * const composeFileNameEnd = line;
                *line = '\0';
                ++line;

                // locale name
                while (*line && (*line == ' ' || *line == '\t'))
                    ++line;
                const char * const lc = line;
                while (*line && *line != ' ' && *line != '\t' && *line != '\n')
                    ++line;
                *line = '\0';
                if (localeNameLength == (line - lc) && !strncasecmp(lc, localeData, line - lc)) {
                    file = QString::fromLocal8Bit(l, composeFileNameEnd - l);
                    break;
                }
            }
        }
        mappings.close();
    }
    return file;
}
Beispiel #5
0
#include <fcntl.h>
#include "gdbm.h"

#if defined(WIN32) && !defined(__CYGWIN__)
#include "gdbmerrno.h"
extern const char * gdbm_strerror(gdbm_error);
#endif

PyDoc_STRVAR(gdbmmodule__doc__,
"This module provides an interface to the GNU DBM (GDBM) library.\n\
\n\
This module is quite similar to the dbm module, but uses GDBM instead to\n\
provide some additional functionality. Please note that the file formats\n\
created by GDBM and dbm are incompatible. \n\
\n\
GDBM objects behave like mappings (dictionaries), except that keys and\n\
values are always strings. Printing a GDBM object doesn't print the\n\
keys and values, and the items() and values() methods are not\n\
supported.");

typedef struct {
    PyObject_HEAD
    int di_size;	/* -1 means recompute */
    GDBM_FILE di_dbm;
} dbmobject;

static PyTypeObject Dbmtype;

#define is_dbmobject(v) ((v)->ob_type == &Dbmtype)
#define check_dbmobject_open(v) if ((v)->di_dbm == NULL) \
    { PyErr_SetString(DbmError, "GDBM object has already been closed"); \
Beispiel #6
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}