예제 #1
0
std::string ns_compile_sql_where_clause(const std::set<unsigned long> & ids, const std::string column_name) {
    std::string r;
    if (!ids.empty()) {
        r += " (id=";
        r += ns_to_string(*ids.begin());

        for (std::set<unsigned long>::const_iterator p = ++ids.begin(); p!= ids.end(); p++) {
            r += " OR ";
            r+=column_name;
            r+= "=";
            r += ns_to_string(*p);
        }
        r += ")";
    }
    return r;
}
예제 #2
0
파일: do_reserve.c 프로젝트: A1ve5/slurm
/**
 * basil_reserve  -  wrapper around rsvn_new.
 * @user:       owner of the reservation
 * @batch_id:   (numeric) job ID
 * @width:      mppwidth (aprun -n)
 * @depth:      mppdepth (aprun -d)
 * @nppn:       mppnppn  (aprun -N)
 * @mem_mb:     mppmem   (aprun -m)
 * @ns_head:    list of requested mppnodes (will be freed if not NULL)
 * @accel_head: optional accelerator parameters
 * Returns reservation ID > 0 if ok, negative %enum basil_error on error.
 */
long basil_reserve(const char *user, const char *batch_id,
		   uint32_t width, uint32_t depth, uint32_t nppn,
		   uint32_t mem_mb, uint32_t nppcu, struct nodespec *ns_head,
		   struct basil_accel_param *accel_head)
{
	struct basil_reservation *rsvn;
	struct basil_parse_data bp = {0};
	/* do not free mppnodes it is stored/freed in the rsvn struct */
	char *mppnodes = ns_to_string(ns_head);
	long rc;

	free_nodespec(ns_head);
	rsvn = _rsvn_new(user, batch_id, width, depth, nppn, mem_mb,
			 nppcu, mppnodes, accel_head);
	if (rsvn == NULL)
		return -BE_INTERNAL;

	bp.method    = BM_reserve;
	bp.mdata.res = rsvn;
	bp.version   = BV_1_0;
	/*
	 * Rule:
	 * - if *res->batch_id is set, we are using Basil 1.1
	 * - if *res->batch_id == '\0' we have to fall back to Basil 1.0
	 */
	if (batch_id && *batch_id)
		bp.version = get_basil_version();

	rc = basil_request(&bp);
	if (rc >= 0)
		rc = rsvn->rsvn_id;
	free_rsvn(rsvn);
	return rc;
}
예제 #3
0
파일: sysctl.c 프로젝트: nwmcsween/criu
static int __userns_sysctl_op(void *arg, int proc_fd, pid_t pid)
{
	int fd, ret = -1, dir, i, status, *fds = NULL;
	struct sysctl_userns_req *userns_req = arg;
	int op = userns_req->op;
	struct sysctl_req *req, **reqs = NULL;
	sigset_t blockmask, oldmask;
	pid_t worker;

	// fix up the pointer
	req = userns_req->reqs = (struct sysctl_req *) &userns_req[1];

	/* For files in the IPC/UTS namespaces, restoring is more complicated
	 * than for net. Unprivileged users cannot even open these files, so
	 * they must be opened by usernsd. However, the value in the kernel is
	 * changed for the IPC/UTS namespace that write()s to the open sysctl
	 * file (not who opened it). So, we must set the value from inside the
	 * usernsd caller's namespace. We:
	 *
	 * 1. unsd opens the sysctl files
	 * 2. forks a task
	 * 3. setns()es to the UTS/IPC namespace of the caller
	 * 4. write()s to the files and exits
	 */
	dir = open("/proc/sys", O_RDONLY, O_DIRECTORY);
	if (dir < 0) {
		pr_perror("Can't open sysctl dir");
		return -1;
	}

	fds = xmalloc(sizeof(int) * userns_req->nr_req);
	if (!fds)
		goto out;

	reqs = xmalloc(sizeof(struct sysctl_req) * userns_req->nr_req);
	if (!reqs)
		goto out;

	memset(fds, -1, sizeof(int) * userns_req->nr_req);

	for (i = 0; i < userns_req->nr_req; i++)  {
		int arg_len = sysctl_userns_arg_size(req->type);
		int name_len = strlen((char *) &req[1]) + 1;
		int total_len = sizeof(*req) + arg_len + name_len;
		int flags;

		/* fix up the pointers */
		req->name = (char *) &req[1];
		req->arg = req->name + name_len;

		if (((char *) req) + total_len >= ((char *) userns_req) + MAX_UNSFD_MSG_SIZE) {
			pr_err("bad sysctl req %s, too big: %d\n", req->name, total_len);
			goto out;
		}

		if (op == CTL_READ)
			flags = O_RDONLY;
		else
			flags = O_WRONLY;

		fd = openat(dir, req->name, flags);
		if (fd < 0) {
			if (errno == ENOENT && (req->flags & CTL_FLAGS_OPTIONAL))
				continue;
			pr_perror("Can't open sysctl %s", req->name);
			goto out;
		}

		/* save a pointer to the req, so we don't need to recompute its
		 * location
		 */
		reqs[i] = req;
		fds[i] = fd;

		req = (struct sysctl_req *) (((char *) req) + total_len);
	}

	/*
	 * Don't let the sigchld_handler() mess with us
	 * calling waitpid() on the exited worker. The
	 * same is done in cr_system().
	 */

	sigemptyset(&blockmask);
	sigaddset(&blockmask, SIGCHLD);
	sigprocmask(SIG_BLOCK, &blockmask, &oldmask);

	worker = fork();
	if (worker < 0)
		goto out;

	if (!worker) {
		int nsfd;
		const char *nsname = ns_to_string(userns_req->ns);

		BUG_ON(!nsname);
		nsfd = openat(proc_fd, nsname, O_RDONLY);
		if (nsfd < 0) {
			pr_perror("failed to open pid %d's ns %s", pid, nsname);
			exit(1);
		}

		if (setns(nsfd, 0) < 0) {
			pr_perror("failed to setns to %d's ns %s", pid, nsname);
			exit(1);
		}

		close(nsfd);

		for (i = 0; i < userns_req->nr_req; i++) {
			if (do_sysctl_op(fds[i], reqs[i], op) < 0)
				exit(1);
		}

		exit(0);
	}

	if (waitpid(worker, &status, 0) != worker) {
		pr_perror("worker didn't die?");
		kill(worker, SIGKILL);
		goto out;
	}
	sigprocmask(SIG_SETMASK, &oldmask, NULL);

	if (!WIFEXITED(status) || WEXITSTATUS(status)) {
		pr_err("worker failed: %d\n", status);
		goto out;
	}

	ret = 0;

out:
	if (fds) {
		for (i = 0; i < userns_req->nr_req; i++) {
			if (fds[i] < 0)
				break;
			close_safe(&fds[i]);
		}

		xfree(fds);
	}

	if (reqs)
		xfree(reqs);

	close_safe(&dir);

	return ret;
}
예제 #4
0
void ns_buffered_capture_scheduler::store_last_update_time_in_db(const ns_synchronized_time & time,ns_local_buffer_connection & sql) {
    image_server.set_cluster_constant_value("local_time_of_last_buffer_upload_to_central_db",ns_to_string(time.local_time),&sql);
    image_server.set_cluster_constant_value("central_time_of_last_buffer_upload_to_central_db",ns_to_string(time.remote_time),&sql);
}
예제 #5
0
void ns_buffered_capture_scheduler::get_last_update_time(ns_local_buffer_connection & local_buffer_sql) {
    time_of_last_update_from_central_db.local_time = atol(image_server.get_cluster_constant_value("local_time_of_last_buffer_upload_to_central_db",ns_to_string(ns_default_update_time),&local_buffer_sql).c_str());
    time_of_last_update_from_central_db.remote_time = atol(image_server.get_cluster_constant_value("central_time_of_last_buffer_upload_to_central_db",ns_to_string(ns_default_update_time),&local_buffer_sql).c_str());
}
예제 #6
0
void ns_buffered_capture_scheduler::update_local_buffer_from_central_server(ns_image_server_device_manager::ns_device_name_list & connected_devices,ns_local_buffer_connection & local_buffer, ns_sql & central_db) {

    if (connected_devices.size() == 0)
        return;

    ns_acquire_lock_for_scope lock(buffer_capture_scheduler_lock,__FILE__,__LINE__);

    local_buffer.clear_query();
    central_db.clear_query();

    std::string local_time = local_buffer.get_value("SELECT UNIX_TIMESTAMP(NOW())"),
                central_time = central_db.get_value("SELECT UNIX_TIMESTAMP(NOW())");

    const ns_synchronized_time update_start_time(atol(local_time.c_str())-10,atol(central_time.c_str())-10);//go ten seconds into the past
    //to make sure all writes
    //are committed

    //now we update the local buffer to the central node.
    commit_all_local_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);
    //now that all the local buffer data is reflected in the central database, we check to see if there is any new data in the central database.
    //if so, we wipe the local buffer and update everything.

    capture_schedule.load_if_needed(&central_db);
    //get any new or updated capture schedule events

    central_db << "SELECT sched.id, samp.id, samp.experiment_id, UNIX_TIMESTAMP(sched.time_stamp),UNIX_TIMESTAMP(samp.time_stamp)";

    for (unsigned int i = 0; i < capture_schedule.table_format.column_names.size(); i++)
        central_db << ",`sched`.`" << capture_schedule.table_format.column_names[i] << "`";

    central_db << " FROM capture_schedule as sched, capture_samples as samp "
               << "WHERE (samp.device_name='" << connected_devices[0].name << "'";

    for (unsigned int i = 1; i < connected_devices.size(); i++)
        central_db << " OR samp.device_name='" << connected_devices[i].name << "'";

    central_db << ")"
               << " AND sched.time_at_start = 0 "
               << " AND sched.sample_id = samp.id "
               << " AND sched.time_at_finish = 0 "
               //here, we could bring the entire local database completely up to date
               //but only scans in the future will make any difference, so we only download
               //those who are still scheduled for the future
               //this old command would fully update the database, as time_of_last_update_from_central_db
               //would be set to 0
               //<< " AND sched.scheduled_time > " << (time_of_last_update_from_central_db.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future
               //however, now we only grab the future, relevant scans.
               << " AND sched.scheduled_time > " << (update_start_time.remote_time-image_server.maximum_allowed_local_scan_delay())  //only get events in the future

               << " AND sched.time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time <<") "
               << " AND sched.time_stamp <= FROM_UNIXTIME(" << update_start_time.remote_time << ") "
               << " ORDER BY sched.scheduled_time ASC";


    ns_sql_result new_schedule;
    central_db.get_rows(new_schedule);
    std::set<unsigned long> altered_experiment_ids;
    std::set<unsigned long> altered_sample_ids;
    for (unsigned int i = 0; i < new_schedule.size(); i++) {
        //	if (atol(new_schedule[i][4].c_str()) > central_time_of_last_update_from_central_db){
        altered_sample_ids.insert(atol(new_schedule[i][1].c_str()));
        altered_experiment_ids.insert(atol(new_schedule[i][2].c_str()));
        //	}
    }
    const unsigned long new_timestamp(update_start_time.local_time);

    if (new_schedule.size() != 0) {
        if (new_schedule.size() > 4)
            image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::")
                                               << new_schedule.size() << " new capture schedule entries found.  Updating local buffer.",&central_db);

        //if samples or experiments have changed or added, update them.
        //we need to do this *before* updating the capture schedule,
        //as the addition of a capture schedule item might trigger a scan immediately
        //and that scan will fail if the sample and experiemnts information isn't already in the local database.
        if (altered_sample_ids.size() > 0) {
            capture_samples.load_if_needed("capture_samples",&central_db);
            experiments.load_if_needed("experiments",&central_db);
            std::string sample_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_sample_ids,"id")),
                experiment_where_clause(std::string(" WHERE ") + ns_compile_sql_where_clause(altered_experiment_ids,"id"));

            ns_sql_result capture_sample_data;
            ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,sample_where_clause,capture_sample_data,&central_db);
            ns_sql_result experiment_data;
            ns_get_all_column_data_from_table("experiments",experiments.column_names,experiment_where_clause,experiment_data,&central_db);

            std::cerr << "Updating local buffer with information about " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            if (capture_samples.time_stamp_column_id == -1)
                throw ns_ex("Could not find capture sample time stamp column!");
            long last_displayed_percent(-5);
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                const long percent((100*i)/capture_sample_data.size());
                if (percent >= last_displayed_percent+5) {
                    std::cerr << percent << "%...";
                    last_displayed_percent = percent;
                }
                std::string values;

                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++) {
                    if (j == capture_samples.time_stamp_column_id)	//we need to update the local time stamp here, so that if there might be a clock asynchrony between the
                        continue;									//central server and local server that would allow remote timestamps to be in the future according to local
                    //which would trigger the local server to update the central in the next check, ad infinitum.
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";
                }
                values += std::string(",`time_stamp`=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";
                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
            std::cerr << "Done.\n";
            //local_buffer.send_query("DELETE FROM buffered_experiments");
            for(unsigned int i = 0; i < experiment_data.size(); i++) {
                std::string values;
                values += "`";
                values += experiments.column_names[0] + "`='" + local_buffer.escape_string(experiment_data[i][0]) + "'";
                for (unsigned int j = 1; j < experiments.column_names.size(); j++) {
                    if (experiments.time_stamp_column_id == j)
                        continue;
                    values += std::string(",`") + experiments.column_names[j] + "`='" + local_buffer.escape_string(experiment_data[i][j]) + "'";
                }
                values += std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";

                local_buffer << "INSERT INTO buffered_experiments SET " << values;
                local_buffer << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
        std::cerr << "Updating local buffer with information about " << new_schedule.size() << " schedule time points...\n";
        long last_displayed_percent = -5;
        for (unsigned int i = 0; i < new_schedule.size(); i++) {
            const long percent((100*i)/new_schedule.size());
            if (percent >= last_displayed_percent+5) {
                std::cerr << percent << "%...";
                last_displayed_percent = percent;
            }
            std::string all_values;
            all_values += "`";
            all_values += capture_schedule.table_format.column_names[0] + "`='" + local_buffer.escape_string(new_schedule[i][5]) + "'";
            for (unsigned int j = 1; j < capture_schedule.table_format.column_names.size(); j++) {
                if (j == capture_schedule.time_stamp_column)
                    continue;
                all_values += std::string( ", `") + capture_schedule.table_format.column_names[j] + "`='" + local_buffer.escape_string(new_schedule[i][5+j]) + "'";
            }
            all_values+=std::string(",time_stamp=FROM_UNIXTIME(") + ns_to_string(new_timestamp) + ")";


            std::string update_values;
            update_values += std::string("problem=") + new_schedule[i][5+capture_schedule.problem_column] + ","
                             + std::string("scheduled_time=") + new_schedule[i][5+capture_schedule.scheduled_time_column] + ","
                             + std::string("missed=") + new_schedule[i][5+capture_schedule.missed_column] + ","
                             + std::string("censored=") + new_schedule[i][5+capture_schedule.censored_column] +","
                             + std::string("transferred_to_long_term_storage=") + new_schedule[i][5+capture_schedule.transferred_to_long_term_storage_column] +","
                             + std::string("time_during_transfer_to_long_term_storage=") + new_schedule[i][5+capture_schedule.time_during_transfer_to_long_term_storage_column] +","
                             + std::string("time_during_deletion_from_local_storage=") + new_schedule[i][5+capture_schedule.time_during_deletion_from_local_storage_column] + ","
                             + std::string("time_stamp=FROM_UNIXTIME(") + ns_to_string(update_start_time.local_time) + ")";


            local_buffer << "INSERT INTO buffered_capture_schedule SET " << all_values
                         << " ON DUPLICATE KEY UPDATE " << update_values;
            local_buffer.send_query();
        }
        std::cerr << "Done.\n";
    }
    //if no changes to the schedule were made, look to see find changes made to any capture samples
    else {

        ns_sql_result capture_sample_data;
        ns_get_all_column_data_from_table("capture_samples",capture_samples.column_names,
                                          std::string("WHERE time_stamp >= FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.remote_time) +") "
                                          " AND time_stamp < FROM_UNIXTIME(" + ns_to_string(update_start_time.remote_time) +") "
                                          ,capture_sample_data,&central_db);
        if (capture_sample_data.size() > 0) {
            std::cerr << "Copying over " << capture_sample_data.size() << " samples\n";
            //local_buffer_db.send_query("DELETE FROM buffered_capture_samples");
            for(unsigned int i = 0; i < capture_sample_data.size(); i++) {
                std::string values;
                values += "`";
                values += capture_samples.column_names[0] + "`='" + local_buffer.escape_string(capture_sample_data[i][0]) + "'";
                for (unsigned int j = 1; j < capture_samples.column_names.size(); j++)
                    values += std::string(",`") +  capture_samples.column_names[j] + "`='" + local_buffer.escape_string(capture_sample_data[i][j]) + "'";

                local_buffer << "INSERT INTO buffered_capture_samples SET " << values
                             << " ON DUPLICATE KEY UPDATE " << values;
                local_buffer.send_query();
            }
        }
    }

    local_buffer.send_query("COMMIT");
    //lock.unlock();

    commit_all_local_non_schedule_changes_to_central_db(update_start_time,local_buffer,central_db);

    central_db << "SELECT k,v FROM constants WHERE time_stamp > FROM_UNIXTIME(" << time_of_last_update_from_central_db.remote_time << ")";
    ns_sql_result cres;
    central_db.get_rows(cres);
    if (cres.size() > 0) {
        std::cerr << "Updating " << cres.size() << " constants in local buffer\n";
    }
    for (unsigned int i = 0; i < cres.size(); i++)
        image_server.set_cluster_constant_value(local_buffer.escape_string(cres[i][0]),local_buffer.escape_string(cres[i][1]),&local_buffer,update_start_time.local_time);
    time_of_last_update_from_central_db = update_start_time;
    store_last_update_time_in_db(time_of_last_update_from_central_db,local_buffer);

    lock.release();
}
예제 #7
0
void ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db(const ns_synchronized_time & update_start_time,ns_local_buffer_connection & local_buffer_sql, ns_sql & central_db) {
    if (time_of_last_update_from_central_db.local_time == ns_default_update_time)
        get_last_update_time(local_buffer_sql);
    buffered_capture_schedule.load_if_needed(&local_buffer_sql);

    ns_sql_result updated_data;

    const std::string altered_data_condition(
        std::string("time_stamp > FROM_UNIXTIME(") + ns_to_string(time_of_last_update_from_central_db.local_time) +
        ") AND time_stamp <= FROM_UNIXTIME(" + ns_to_string(update_start_time.local_time) + ") ");

    const unsigned long new_timestamp(time_of_last_update_from_central_db.remote_time);

    ns_get_all_column_data_from_table("buffered_capture_schedule",buffered_capture_schedule.table_format.column_names,
                                      std::string("WHERE ") + altered_data_condition + " AND uploaded_to_central_db != 3",
                                      updated_data,&local_buffer_sql);

    std::vector<ns_db_key_mapping> mappings(updated_data.size());
    if (updated_data.size() > 8)
        image_server.register_server_event(ns_image_server_event("ns_buffered_capture_scheduler::Committing ") << updated_data.size() << " recorded capture events to the central database.",&central_db);
    std::vector<ns_ex *> errors;
    for (unsigned long i = 0; i < updated_data.size(); i++) {
        try {
            unsigned long captured_image_id = atol(updated_data[i][buffered_capture_schedule.image_id_column].c_str());
            unsigned long problem_id = atol(updated_data[i][buffered_capture_schedule.problem_column].c_str());

            unsigned long central_captured_image_id(0),
                     central_problem_id(0);
            if (captured_image_id != 0 || problem_id != 0) {
                central_db << "SELECT captured_image_id,problem FROM capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                ns_sql_result res;
                central_db.get_rows(res);
                if (res.size() == 0)
                    throw ns_ex("Could not find capture schedule entry in central db for sample id " ) << updated_data[i][buffered_capture_schedule.id_column] << " finishing at time " << updated_data[i][buffered_capture_schedule.time_at_finish_column];
                central_captured_image_id = atol(res[0][0].c_str());
                central_problem_id = atol(res[0][1].c_str());
            }

            const bool need_to_make_new_capture_image(captured_image_id != 0 && central_captured_image_id != captured_image_id);
            //we need to make new entries in the central database for any new images or events
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.load_from_db(captured_image_id,&local_buffer_sql);
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].old_image = mappings[i].image;
                if (mappings[i].captured_image.capture_images_image_id != 0)
                    mappings[i].image.load_from_db(mappings[i].captured_image.capture_images_image_id,&local_buffer_sql);


            }
            else {
                mappings[i].old_image = mappings[i].image;
                mappings[i].old_captured_image = mappings[i].captured_image;
                mappings[i].captured_image.captured_images_id = central_captured_image_id;
            }

            bool need_to_make_new_problem(problem_id != 0 && central_problem_id != problem_id);
            if (need_to_make_new_problem) {
                local_buffer_sql << "SELECT id,event,time,minor FROM buffered_host_event_log WHERE id = " << updated_data[i][buffered_capture_schedule.problem_column];
                ns_sql_result res;
                local_buffer_sql.get_rows(res);
                mappings[i].old_problem_id = mappings[i].problem_id;
                if (res.size() == 0) {
                    mappings[i].problem_id = image_server.register_server_event(ns_ex("Could not find problem id ") << updated_data[i][buffered_capture_schedule.problem_column] << " in local database buffer!",&central_db);
                    need_to_make_new_problem=false;
                }
                else {
                    mappings[i].problem_text = res[0][1];
                    mappings[i].problem_time = atol(res[0][2].c_str());
                    mappings[i].problem_minor = res[0][3] != "0";
                }
            }
            else {
                mappings[i].old_problem_id = mappings[i].problem_id;
                mappings[i].problem_id = central_problem_id;
            }

            if (need_to_make_new_capture_image && mappings[i].image.id != 0) {
                mappings[i].image.id = 0;
                mappings[i].image.save_to_db(0,&central_db,false);
                mappings[i].captured_image.capture_images_image_id = mappings[i].image.id;
            }
            if (need_to_make_new_capture_image) {
                mappings[i].captured_image.captured_images_id = 0;
                mappings[i].captured_image.save(&central_db);
            }
            if (need_to_make_new_problem) {
                mappings[i].old_problem_id = mappings[i].problem_id;
                ns_image_server_event ev;
                ev << mappings[i].problem_text;
                if (mappings[i].problem_minor) ev << ns_ts_minor_event;
                ev.set_time(mappings[i].problem_time);
                mappings[i].problem_id = image_server.register_server_event(ev,&central_db);
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error while making mapping: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }

    for (unsigned long i = 0; i < updated_data.size(); i++) {
        if (mappings[i].error.text().size() > 0)
            continue;
        try {
            central_db << "Update capture_schedule SET ";
            for (unsigned int j = 0; j < buffered_capture_schedule.table_format.column_names.size(); ++j) {
                if (j == buffered_capture_schedule.id_column
                        || j == buffered_capture_schedule.image_id_column ||
                        j == buffered_capture_schedule.problem_column || j ==
                        buffered_capture_schedule.timestamp_column) continue;
                central_db  << "`" << buffered_capture_schedule.table_format.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]) << "',";
            }
            central_db << "captured_image_id = " << mappings[i].captured_image.captured_images_id
                       << ", problem = " << mappings[i].problem_id;
            //we set the timestamp as old so that it does not trigger a re-download when the server tries to update its cache
            central_db << ", time_stamp=FROM_UNIXTIME("<< new_timestamp <<") ";
            central_db << " WHERE id = " << updated_data[i][0];
            central_db.send_query();
            if (mappings[i].old_captured_image.captured_images_id != mappings[i].captured_image.captured_images_id ||
                    mappings[i].old_problem_id != mappings[i].problem_id) {
                local_buffer_sql << "UPDATE buffered_capture_schedule SET captured_image_id = " << mappings[i].captured_image.captured_images_id
                                 << ", problem = " << mappings[i].problem_id << ", time_stamp = FROM_UNIXTIME("<< new_timestamp
                                 <<") WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_captured_images WHERE id = " << mappings[i].old_captured_image.captured_images_id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_images WHERE id = " << mappings[i].old_image.id;
                local_buffer_sql.send_query();
                local_buffer_sql << "DELETE FROM buffered_host_event_log WHERE id = " << mappings[i].old_problem_id;
                local_buffer_sql.send_query();
            }
            if (updated_data[i][buffered_capture_schedule.time_at_finish_column] != "0") {
                local_buffer_sql << "DELETE FROM buffered_capture_schedule WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
                local_buffer_sql.send_query();
                //	local_buffer_sql.clear_query();
            }
        }
        catch(ns_ex & ex) {
            mappings[i].error << "Error during central update: " << ex.text();
            errors.push_back(&mappings[i].error);
        }
    }
    for (unsigned int i = 0; i < mappings.size(); i++) {
        if (mappings[i].error.text().size() > 0) {

            local_buffer_sql << "UPDATE buffered_capture_schedule SET uploaded_to_central_db=3 WHERE id = " << updated_data[i][buffered_capture_schedule.id_column];
            local_buffer_sql.send_query();
            image_server.register_server_event(ns_image_server::ns_register_in_central_db_with_fallback,ns_ex("Could not update central db: ") << mappings[i].error.text());
        }

    }
    //update modified sample data.
    updated_data.resize(0);
    if (capture_samples.column_names.size() == 0) {
        capture_samples.load_column_names_from_db("capture_samples",&central_db);
        if (capture_samples.column_names.size() == 0)
            throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table appears to have no columns!");
    }
    if (capture_samples.column_names[0] != "id")
        throw ns_ex("ns_buffered_capture_scheduler::commit_all_local_schedule_changes_to_central_db()::Capture sample table does not have its id in the first column!");

    ns_get_all_column_data_from_table("buffered_capture_samples",capture_samples.column_names,
                                      std::string("WHERE ") + altered_data_condition,
                                      updated_data,&local_buffer_sql);
    if (capture_samples.time_stamp_column_id == -1)
        throw ns_ex("Could not find capture sample time stamp column!");
    for (unsigned int i = 0; i < updated_data.size(); i++) {
        central_db << "UPDATE capture_samples SET ";
        //skip id column; we don't want to cause any unneccisary db shuffling by changing ids (even if we are changing the ids to the value they already are)
        central_db  << capture_samples.column_names[1] << "='" << central_db.escape_string(updated_data[i][1]);
        for (unsigned int j = 2; j < capture_samples.column_names.size(); ++j) {
            if (j == capture_samples.time_stamp_column_id)
                continue;
            else central_db  << "',`" << capture_samples.column_names[j] << "`='" << central_db.escape_string(updated_data[i][j]);
        }
        central_db << "',time_stamp=FROM_UNIXTIME(" << new_timestamp << ") ";
        central_db << "WHERE id = " << updated_data[i][0];
        central_db.send_query();
    }

}