Example #1
0
File: csl.c Project: Tayyib/uludag
static PyObject *
c_call(PyObject *self, PyObject *args)
{
	struct ipc_struct ipc;
	struct pack *p;
	char *node;
	char *pak;
	int nd;

	if (!PyArg_ParseTuple(args, "ss", &node, &pak))
		return NULL;

	nd = model_lookup_method(node);
	if (nd == -1) return NULL;

	memset(&ipc, 0, sizeof(struct ipc_struct));
	ipc.node = nd;
	p = pack_new(128);
	pack_put(p, pak, strlen(pak));

	job_start(CMD_CALL_PACKAGE, &ipc, p);

	while (1) {
		struct ProcChild *sender;
		int cmd;
		int size;

		if (1 == proc_listen(&sender, &cmd, &size, 1)) {
			switch (cmd) {
				case CMD_RESULT:
				case CMD_FAIL:
				case CMD_ERROR:
				case CMD_NONE:
					proc_get(sender, &ipc, p, size);
					break;
				case CMD_NOTIFY:
					proc_get(sender, &ipc, p, size);
					proc_put(TO_PARENT, cmd, &ipc, p);
					break;
			}
			if (cmd == CMD_FINISH) break;
		}
	}
	Py_INCREF(Py_None);
	return Py_None;
}
Example #2
0
int task_wait(struct task **tasks, struct task *t, int async)
{
	struct proc *p;
	struct job *j;

	for(j = t->jobs; j; j = j->next)
		while(!job_complete(j)){
			/* TODO: term_give_to() */
			job_wait(j, async);

			if(async && j->state == JOB_RUNNING)
				/* no change */
				return 0;
			else if(j->state == JOB_COMPLETE){
				/* changed to complete, next job in the list */
				if(j->next){
					job_start(j->next);
					if(async)
						return 0;
					continue;
				}else{
					task_rm(tasks, t);
					return 1;
				}
			}

			/*
			* check for a stopped proc
			* if we find one, it's
			* been ^Z'd, stop other procs and return
			*/
			for(p = j->proc; p; p = p->next)
				if(p->state == PROC_STOP){
					job_sig(j, SIGSTOP);
					return 0;
					/* don't attempt to move onto any other jobs */
				}
		}

	return 0;
}
Example #3
0
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
                         Error **errp)
{
    BlockdevCreateJob *s;
    const char *fmt = BlockdevDriver_str(options->driver);
    BlockDriver *drv = bdrv_find_format(fmt);

    /* If the driver is in the schema, we know that it exists. But it may not
     * be whitelisted. */
    assert(drv);
    if (bdrv_uses_whitelist() && !bdrv_is_whitelisted(drv, false)) {
        error_setg(errp, "Driver is not whitelisted");
        return;
    }

    /* Error out if the driver doesn't support .bdrv_co_create */
    if (!drv->bdrv_co_create) {
        error_setg(errp, "Driver does not support blockdev-create");
        return;
    }

    /* Create the block job */
    /* TODO Running in the main context. Block drivers need to error out or add
     * locking when they use a BDS in a different AioContext. */
    s = job_create(job_id, &blockdev_create_job_driver, NULL,
                   qemu_get_aio_context(), JOB_DEFAULT | JOB_MANUAL_DISMISS,
                   NULL, NULL, errp);
    if (!s) {
        return;
    }

    s->drv = drv,
    s->opts = QAPI_CLONE(BlockdevCreateOptions, options),

    job_start(&s->common);
}
Example #4
0
/**
 * gstreamill_job_start:
 * @job: (in): json type of job description.
 *
 * Returns: json type of job execution result. 
 */
gchar * gstreamill_job_start (Gstreamill *gstreamill, gchar *job_desc)
{
        gchar *p, *name;
        Job *job;

        if (!jobdesc_is_valid (job_desc)) {
                p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"invalid job\"\n}");
                return p;
        }

        if (jobdesc_is_live (job_desc)) {
                GST_ERROR ("live job arrived:\n%s", job_desc);

        } else {
                GST_ERROR ("transcode job arrived:\n%s", job_desc);
        }

        /* create job object */
        name = jobdesc_get_name (job_desc);
        if (get_job (gstreamill, name) != NULL) {
                GST_ERROR ("start job failure, duplicated name %s.", name);
                p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"duplicated name\"\n}");
                g_free (name);
                return p;
        }
        job = job_new ("job", job_desc, "name", name, "exe_path", gstreamill->exe_path, NULL);
        g_free (name);

        /* job initialize */
        job->log_dir = gstreamill->log_dir;
        g_mutex_init (&(job->access_mutex));
        job->is_live = jobdesc_is_live (job_desc);
        job->eos = FALSE;
        job->current_access = 0;
        job->age = 0;
        job->last_start_time = NULL;
        if (job_initialize (job, gstreamill->daemon) != 0) {
                p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"initialize job failure\"\n}");
                g_object_unref (job);
                return p;
        }

        if (job->is_live && (job_output_initialize (job) != 0)) {
                p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"initialize job output failure\"\n}");
                g_object_unref (job);
                return p;
        }

        /* reset and start job */
        job_reset (job);
        if (gstreamill->daemon) {
                guint64 stat;

                stat = create_job_process (job);
                if (stat == JOB_STATE_PLAYING) {
                        GST_ERROR ("Start job %s success", job->name);
                        g_mutex_lock (&(gstreamill->job_list_mutex));
                        gstreamill->job_list = g_slist_append (gstreamill->job_list, job);
                        g_mutex_unlock (&(gstreamill->job_list_mutex));
                        p = g_strdup_printf ("{\n    \"name\": \"%s\",\n\"result\": \"success\"\n}", job->name);

                } else {
                        GST_ERROR ("Start job %s failure, return stat: %s", job->name, job_state_get_name (stat));
                        g_object_unref (job);
                        p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"create process failure\"\n}");
                }

        } else {
                job_encoders_output_initialize (job);
                if (job_start (job) == 0) {
                        g_mutex_lock (&(gstreamill->job_list_mutex));
                        gstreamill->job_list = g_slist_append (gstreamill->job_list, job);
                        g_mutex_unlock (&(gstreamill->job_list_mutex));
                        p = g_strdup ("{\n    \"result\": \"success\"\n}");

                } else {
                        p = g_strdup_printf ("{\n    \"result\": \"failure\",\n    \"reason\": \"unknown\"\n}");
                }
        }

        return p;
}
Example #5
0
int task_start(struct task *t)
{
	t->state = TASK_RUNNING;
	return job_start(t->jobs);
}
Example #6
0
void stream_start(const char *job_id, BlockDriverState *bs,
                  BlockDriverState *base, const char *backing_file_str,
                  int creation_flags, int64_t speed,
                  BlockdevOnError on_error, Error **errp)
{
    StreamBlockJob *s;
    BlockDriverState *iter;
    bool bs_read_only;

    if (bdrv_freeze_backing_chain(bs, base, errp) < 0) {
        return;
    }

    /* Make sure that the image is opened in read-write mode */
    bs_read_only = bdrv_is_read_only(bs);
    if (bs_read_only) {
        if (bdrv_reopen_set_read_only(bs, false, errp) != 0) {
            bs_read_only = false;
            goto fail;
        }
    }

    /* Prevent concurrent jobs trying to modify the graph structure here, we
     * already have our own plans. Also don't allow resize as the image size is
     * queried only at the job start and then cached. */
    s = block_job_create(job_id, &stream_job_driver, NULL, bs,
                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                         BLK_PERM_GRAPH_MOD,
                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
                         BLK_PERM_WRITE,
                         speed, creation_flags, NULL, NULL, errp);
    if (!s) {
        goto fail;
    }

    /* Block all intermediate nodes between bs and base, because they will
     * disappear from the chain after this operation. The streaming job reads
     * every block only once, assuming that it doesn't change, so block writes
     * and resizes. */
    for (iter = backing_bs(bs); iter && iter != base; iter = backing_bs(iter)) {
        block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
                           BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED,
                           &error_abort);
    }

    s->base = base;
    s->backing_file_str = g_strdup(backing_file_str);
    s->bs_read_only = bs_read_only;
    s->chain_frozen = true;

    s->on_error = on_error;
    trace_stream_start(bs, base, s);
    job_start(&s->common.job);
    return;

fail:
    if (bs_read_only) {
        bdrv_reopen_set_read_only(bs, true, NULL);
    }
    bdrv_unfreeze_backing_chain(bs, base);
}
Example #7
0
int main (int argc, char *argv[])
{
        HTTPMgmt *httpmgmt;
        HTTPStreaming *httpstreaming;
        GMainLoop *loop;
        GOptionContext *ctx;
        GError *err = NULL;
        gboolean foreground;
        struct rlimit rlim;
        GDateTime *datetime;
        gchar exe_path[512], *date;

        ctx = g_option_context_new (NULL);
        g_option_context_add_main_entries (ctx, options, NULL);
        g_option_context_add_group (ctx, gst_init_get_option_group ());
        if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
                g_print ("Error initializing: %s\n", GST_STR_NULL (err->message));
                exit (1);
        }
        g_option_context_free (ctx);
        GST_DEBUG_CATEGORY_INIT (GSTREAMILL, "gstreamill", 0, "gstreamill log");

        if (version) {
                print_version_info ();
                exit (0);
        }

        /* stop gstreamill. */
        if (stop) {
                gchar *pid_str;
                gint pid;

                g_file_get_contents (PID_FILE, &pid_str, NULL, NULL);
                if (pid_str == NULL) {
                        g_print ("File %s not found, check if gstreamill is running.\n", PID_FILE);
                        exit (1);
                }
                pid = atoi (pid_str);
                g_free (pid_str);
                g_print ("stoping gstreamill with pid %d ...\n", pid);
                kill (pid, SIGTERM);
                exit (0);
        }

        /* readlink exe path before setuid, on CentOS, readlink exe path after setgid/setuid failure on permission denied */
        memset (exe_path, '\0', sizeof (exe_path));
        if (readlink ("/proc/self/exe", exe_path, sizeof (exe_path)) == -1) {
                g_print ("Read /proc/self/exe error: %s", g_strerror (errno));
                exit (2);
        }

        if (prepare_gstreamill_run_dir () != 0) {
                g_print ("Can't create gstreamill run directory\n");
                exit (3);
        }
/*
        if (set_user_and_group () != 0) {
                g_print ("set user and group failure\n");
                exit (4);
        }
*/
        if (job_file != NULL) {
                /* gstreamill command with job, run in foreground */
                foreground = TRUE;

        } else {
                /* gstreamill command without job, run in background */
                foreground = FALSE;
        }

        if (gst_debug_get_default_threshold () < GST_LEVEL_WARNING) {
                gst_debug_set_default_threshold (GST_LEVEL_WARNING);
        }

        /* initialize ts segment static plugin */
        if (!gst_plugin_register_static (GST_VERSION_MAJOR,
                                         GST_VERSION_MINOR,
                                         "tssegment",
                                         "ts segment plugin",
                                         ts_segment_plugin_init,
                                         "0.1.0",
                                         "GPL",
                                         "GStreamer",
                                         "GStreamer",
                                         "http://gstreamer.net/")) {
                GST_ERROR ("registe tssegment error");
                exit (17);
        }

        /* subprocess, create_job_process */
        if (shm_name != NULL) {
                gint fd;
                gchar *job_desc, *p;
                Job *job;
                gchar *log_path, *name;
                gint ret;

                /* set subprocess maximum of core file */
                rlim.rlim_cur = 0;
                rlim.rlim_max = 0;
                if (setrlimit (RLIMIT_CORE, &rlim) == -1) {
                        GST_ERROR ("setrlimit error: %s", g_strerror (errno));
                }

                /* read job description from share memory */
                job_desc = NULL;
                fd = shm_open (shm_name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
                if (ftruncate (fd, job_length) == -1) {
                        exit (5);
                }
                p = mmap (NULL, job_length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
                job_desc = g_strdup (p);

                if ((job_desc != NULL) && (!jobdesc_is_valid (job_desc))) {
                        exit (6);
                }

                /* initialize log */
                name = (gchar *)jobdesc_get_name (job_desc);
                if (!jobdesc_is_live (job_desc)) {
                        gchar *p;

                        p = jobdesc_get_log_path (job_desc);
                        log_path = g_build_filename (p, "gstreamill.log", NULL);
                        g_free (p);

                } else {
                        log_path = g_build_filename (log_dir, name, "gstreamill.log", NULL);
                }
                ret = init_log (log_path);
                g_free (log_path);
                if (ret != 0) {
                        exit (7);
                }

                /* launch a job. */
                datetime = g_date_time_new_now_local ();
                date = g_date_time_format (datetime, "%b %d %H:%M:%S");
                fprintf (_log->log_hd, "\n*** %s : job %s starting ***\n\n", date, name);
                g_date_time_unref (datetime);
                g_free (date);
                job = job_new ("name", name, "job", job_desc, NULL);
                job->is_live = jobdesc_is_live (job_desc);
                job->eos = FALSE;
                loop = g_main_loop_new (NULL, FALSE);

                GST_INFO ("Initializing job ...");
                if (job_initialize (job, TRUE) != 0) {
                        GST_ERROR ("initialize job failure, exit");
                        exit (8);
                }
                GST_INFO ("Initializing job done");

                GST_INFO ("Initializing job's encoders output ...");
                if (job_encoders_output_initialize (job) != 0) {
                        GST_ERROR ("initialize job encoders' output failure, exit");
                        exit (8);
                }
                GST_INFO ("Initializing job's encoders output done");

                GST_INFO ("Starting job ...");
                if (job_start (job) != 0) {
                        GST_ERROR ("start livejob failure, exit");
                        exit (9);
                }
                datetime = g_date_time_new_now_local ();
                date = g_date_time_format (datetime, "%b %d %H:%M:%S");
                fprintf (_log->log_hd, "\n*** %s : job %s started ***\n\n", date, name);
                g_date_time_unref (datetime);
                g_free (date);
                g_free (name);
                g_free (job_desc);

                signal (SIGPIPE, SIG_IGN);
                signal (SIGUSR1, sighandler);
                signal (SIGTERM, stop_job);

                g_main_loop_run (loop);

        } else {
                /* set parent process maximum of core file */
                rlim.rlim_cur = RLIM_INFINITY;
                rlim.rlim_max = RLIM_INFINITY;
                if (setrlimit (RLIMIT_CORE, &rlim) == -1) {
                        GST_ERROR ("setrlimit error: %s", g_strerror (errno));
                }
        }

        /* run in background? */
        if (!foreground) {
                gchar *path;
                gint ret;

                /* pid file exist? */
                if (g_file_test (PID_FILE, G_FILE_TEST_EXISTS)) {
                        g_print ("file %s found, gstreamill already running !!!\n", PID_FILE);
                        exit (10);
                }

                /* media directory */
                path = g_strdup_printf ("%s/dvr", MEDIA_LOCATION);
                if (!g_file_test (path, G_FILE_TEST_EXISTS)) {
                        g_printf ("Create DVR directory: %s", path);
                        if (g_mkdir_with_parents (path, 0755) != 0) {
                                g_printf ("Create DVR directory failure: %s", path);
                        }
                }
                g_free (path);
                path = g_strdup_printf ("%s/transcode/in", MEDIA_LOCATION);
                if (!g_file_test (path, G_FILE_TEST_EXISTS)) {
                        g_printf ("Create transcode directory: %s", path);
                        if (g_mkdir_with_parents (path, 0755) != 0) {
                                g_printf ("Create transcode directory failure: %s", path);
                        }
                }
                g_free (path);
                path = g_strdup_printf ("%s/transcode/out", MEDIA_LOCATION);
                if (!g_file_test (path, G_FILE_TEST_EXISTS)) {
                        g_printf ("Create transcode directory: %s", path);
                        if (g_mkdir_with_parents (path, 0755) != 0) {
                                g_printf ("Create transcode directory failure: %s", path);
                        }
                }
                g_free (path);

                /* log to file */
                path = g_build_filename (log_dir, "gstreamill.log", NULL);
                ret = init_log (path);
                g_free (path);
                if (ret != 0) {
                        g_print ("Init log error, ret %d.\n", ret);
                        exit (11);
                }

                /* daemonize */
                if (daemon (0, 0) != 0) {
                        fprintf (_log->log_hd, "Failed to daemonize");
                        remove_pid_file ();
                        exit (1);
                }

                /* create pid file */
                if (create_pid_file () != 0) {
                        exit (1);
                }

                /* customize signal */
                signal (SIGUSR1, sighandler);
                signal (SIGTERM, stop_gstreamill);

                datetime = g_date_time_new_now_local ();
                date = g_date_time_format (datetime, "%b %d %H:%M:%S");
                fprintf (_log->log_hd, "\n*** %s : gstreamill started ***\n\n", date);
                g_free (date);
                g_date_time_unref (datetime);
        }

        /* ignore SIGPIPE */
        signal (SIGPIPE, SIG_IGN);

        loop = g_main_loop_new (NULL, FALSE);

        /* gstreamill */
        gstreamill = gstreamill_new ("daemon", !foreground, "log_dir", log_dir, "exe_path", exe_path, NULL);
        if (gstreamill_start (gstreamill) != 0) {
                GST_ERROR ("start gstreamill error, exit.");
                remove_pid_file ();
                exit (12);
        }

        /* httpstreaming, pull */
        httpstreaming = httpstreaming_new ("gstreamill", gstreamill, "address", http_streaming, NULL);
        if (httpstreaming_start (httpstreaming, 10) != 0) {
                GST_ERROR ("start httpstreaming error, exit.");
                remove_pid_file ();
                exit (13);
        }

        if (!foreground) {
                /* run in background, management via http */
                httpmgmt = httpmgmt_new ("gstreamill", gstreamill, "address", http_mgmt, NULL);
                if (httpmgmt_start (httpmgmt) != 0) {
                        GST_ERROR ("start http mangment error, exit.");
                        remove_pid_file ();
                        exit (14);
                }

        } else {
                /* run in foreground, start job */
                gchar *job, *p, *result;
                JSON_Value *val;
                JSON_Object *obj;

                /* ctrl-c, stop gstreamill */
                signal (SIGINT, stop_gstreamill);

                /* ctrl-\, stop gstreamill */
                signal (SIGQUIT, stop_gstreamill);

                if (!g_file_get_contents (job_file, &job, NULL, NULL)) {
                        GST_ERROR ("Read job file %s error.", job_file);
                        exit (15);
                }
                p = gstreamill_job_start (gstreamill, job);
                val = json_parse_string (p);
                obj = json_value_get_object (val);
                result = (gchar *)json_object_get_string (obj, "result");
                GST_INFO ("start job result: %s.", result);
                if (g_strcmp0 (result, "success") != 0) {
                        exit (16);
                }
                json_value_free (val);
                g_free (p);
        }

        g_main_loop_run (loop);

        return 0;
}
Example #8
0
void commit_start(const char *job_id, BlockDriverState *bs,
                  BlockDriverState *base, BlockDriverState *top,
                  int creation_flags, int64_t speed,
                  BlockdevOnError on_error, const char *backing_file_str,
                  const char *filter_node_name, Error **errp)
{
    CommitBlockJob *s;
    BlockDriverState *iter;
    BlockDriverState *commit_top_bs = NULL;
    Error *local_err = NULL;
    int ret;

    assert(top != bs);
    if (top == base) {
        error_setg(errp, "Invalid files for merge: top and base are the same");
        return;
    }

    s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
                         speed, creation_flags, NULL, NULL, errp);
    if (!s) {
        return;
    }

    /* convert base to r/w, if necessary */
    s->base_read_only = bdrv_is_read_only(base);
    if (s->base_read_only) {
        if (bdrv_reopen_set_read_only(base, false, errp) != 0) {
            goto fail;
        }
    }

    /* Insert commit_top block node above top, so we can block consistent read
     * on the backing chain below it */
    commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
                                         errp);
    if (commit_top_bs == NULL) {
        goto fail;
    }
    if (!filter_node_name) {
        commit_top_bs->implicit = true;
    }
    commit_top_bs->total_sectors = top->total_sectors;
    bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top));

    bdrv_set_backing_hd(commit_top_bs, top, &local_err);
    if (local_err) {
        bdrv_unref(commit_top_bs);
        commit_top_bs = NULL;
        error_propagate(errp, local_err);
        goto fail;
    }
    bdrv_replace_node(top, commit_top_bs, &local_err);
    if (local_err) {
        bdrv_unref(commit_top_bs);
        commit_top_bs = NULL;
        error_propagate(errp, local_err);
        goto fail;
    }

    s->commit_top_bs = commit_top_bs;
    bdrv_unref(commit_top_bs);

    /* Block all nodes between top and base, because they will
     * disappear from the chain after this operation. */
    assert(bdrv_chain_contains(top, base));
    for (iter = top; iter != base; iter = backing_bs(iter)) {
        /* XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
         * at s->base (if writes are blocked for a node, they are also blocked
         * for its backing file). The other options would be a second filter
         * driver above s->base. */
        ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
                                 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
                                 errp);
        if (ret < 0) {
            goto fail;
        }
    }

    if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
        goto fail;
    }
    s->chain_frozen = true;

    ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
    if (ret < 0) {
        goto fail;
    }

    s->base = blk_new(BLK_PERM_CONSISTENT_READ
                      | BLK_PERM_WRITE
                      | BLK_PERM_RESIZE,
                      BLK_PERM_CONSISTENT_READ
                      | BLK_PERM_GRAPH_MOD
                      | BLK_PERM_WRITE_UNCHANGED);
    ret = blk_insert_bs(s->base, base, errp);
    if (ret < 0) {
        goto fail;
    }
    s->base_bs = base;

    /* Required permissions are already taken with block_job_add_bdrv() */
    s->top = blk_new(0, BLK_PERM_ALL);
    ret = blk_insert_bs(s->top, top, errp);
    if (ret < 0) {
        goto fail;
    }

    s->backing_file_str = g_strdup(backing_file_str);
    s->on_error = on_error;

    trace_commit_start(bs, base, top, s);
    job_start(&s->common.job);
    return;

fail:
    if (s->chain_frozen) {
        bdrv_unfreeze_backing_chain(commit_top_bs, base);
    }
    if (s->base) {
        blk_unref(s->base);
    }
    if (s->top) {
        blk_unref(s->top);
    }
    job_early_fail(&s->common.job);
    /* commit_top_bs has to be replaced after deleting the block job,
     * otherwise this would fail because of lack of permissions. */
    if (commit_top_bs) {
        bdrv_replace_node(commit_top_bs, top, &error_abort);
    }
}
Example #9
0
/**
 * gstreamill_job_start:
 * @job: (in): json type of job description.
 *
 * Returns: json type of job execution result. 
 */
gchar * gstreamill_job_start (Gstreamill *gstreamill, gchar *job_desc)
{
        gchar *p, *name;
        Job *job;

        if (!jobdesc_is_valid (job_desc)) {
                p = g_strdup ("Invalid job");
                return p;
        }

        if (jobdesc_is_live (job_desc)) {
                GST_ERROR ("live job arrived");

        } else {
                GST_ERROR ("transcode job arrived");
        }

        /* create job object */
        name = jobdesc_get_name (job_desc);
        if (get_job (gstreamill, name) != NULL) {
                GST_ERROR ("start live job failure, duplicated name %s.", name);
                p = g_strdup_printf ("start live job failure, duplicated name %s.", name);
                g_free (name);
                return p;
        }
        job = job_new ("job", job_desc, "name", name, NULL);
        g_free (name);

        /* job initialize */
        job->log_dir = gstreamill->log_dir;
        g_mutex_init (&(job->access_mutex));
        job->is_live = jobdesc_is_live (job_desc);
        job->eos = FALSE;
        job->current_access = 0;
        job->age = 0;
        job->last_start_time = NULL;
        if (job_initialize (job, gstreamill->daemon) != 0) {
                p = g_strdup ("initialize job failure");
                g_object_unref (job);
                return p;
        }

        /* reset and start job */
        job_reset (job);
        if (gstreamill->daemon) {
                p = create_job_process (job);
                GST_ERROR ("%s: %s", p, job->name);
                if (g_str_has_suffix (p, "success")) {
                        g_mutex_lock (&(gstreamill->job_list_mutex));
                        gstreamill->job_list = g_slist_append (gstreamill->job_list, job);
                        g_mutex_unlock (&(gstreamill->job_list_mutex));

                } else {
                        g_object_unref (job);
                }

        } else {
                if (job_start (job) == 0) {
                        g_mutex_lock (&(gstreamill->job_list_mutex));
                        gstreamill->job_list = g_slist_append (gstreamill->job_list, job);
                        g_mutex_unlock (&(gstreamill->job_list_mutex));
                        p = g_strdup ("success");

                } else {
                        p = g_strdup ("failure");
                }
        }

        return p;
}