struct ast_datastore *ast_datastores_alloc_datastore(const struct ast_datastore_info *info, const char *uid) { struct ast_datastore *datastore; char uuid_buf[AST_UUID_STR_LEN]; const char *uid_ptr = uid; if (!info) { return NULL; } datastore = ao2_alloc(sizeof(*datastore), datastore_destroy); if (!datastore) { return NULL; } datastore->info = info; if (ast_strlen_zero(uid)) { /* They didn't provide an ID so we'll provide one ourself */ uid_ptr = ast_uuid_generate_str(uuid_buf, sizeof(uuid_buf)); } datastore->uid = ast_strdup(uid_ptr); if (!datastore->uid) { ao2_ref(datastore, -1); return NULL; } return datastore; }
struct stasis_subscription *internal_stasis_subscribe( struct stasis_topic *topic, stasis_subscription_cb callback, void *data, int needs_mailbox, int use_thread_pool) { RAII_VAR(struct stasis_subscription *, sub, NULL, ao2_cleanup); if (!topic) { return NULL; } /* The ao2 lock is used for join_cond. */ sub = ao2_t_alloc(sizeof(*sub), subscription_dtor, stasis_topic_name(topic)); if (!sub) { return NULL; } ast_uuid_generate_str(sub->uniqueid, sizeof(sub->uniqueid)); if (needs_mailbox) { char tps_name[AST_TASKPROCESSOR_MAX_NAME + 1]; /* Create name with seq number appended. */ ast_taskprocessor_build_name(tps_name, sizeof(tps_name), "sub%c:%s", use_thread_pool ? 'p' : 'm', stasis_topic_name(topic)); /* * With a small number of subscribers, a thread-per-sub is * acceptable. For a large number of subscribers, a thread * pool should be used. */ if (use_thread_pool) { sub->mailbox = ast_threadpool_serializer(tps_name, pool); } else { sub->mailbox = ast_taskprocessor_get(tps_name, TPS_REF_DEFAULT); } if (!sub->mailbox) { return NULL; } ast_taskprocessor_set_local(sub->mailbox, sub); /* Taskprocessor has a reference */ ao2_ref(sub, +1); } ao2_ref(topic, +1); sub->topic = topic; sub->callback = callback; sub->data = data; ast_cond_init(&sub->join_cond, NULL); if (topic_add_subscription(topic, sub) != 0) { return NULL; } send_subscription_subscribe(topic, sub); ao2_ref(sub, +1); return sub; }
struct stasis_subscription *internal_stasis_subscribe( struct stasis_topic *topic, stasis_subscription_cb callback, void *data, int needs_mailbox, int use_thread_pool) { RAII_VAR(struct stasis_subscription *, sub, NULL, ao2_cleanup); if (!topic) { return NULL; } /* The ao2 lock is used for join_cond. */ sub = ao2_t_alloc(sizeof(*sub), subscription_dtor, topic->name); if (!sub) { return NULL; } ast_uuid_generate_str(sub->uniqueid, sizeof(sub->uniqueid)); if (needs_mailbox) { /* With a small number of subscribers, a thread-per-sub is * acceptable. For larger number of subscribers, a thread * pool should be used. */ if (use_thread_pool) { sub->mailbox = ast_threadpool_serializer(sub->uniqueid, pool); } else { sub->mailbox = ast_taskprocessor_get(sub->uniqueid, TPS_REF_DEFAULT); } if (!sub->mailbox) { return NULL; } ast_taskprocessor_set_local(sub->mailbox, sub); /* Taskprocessor has a reference */ ao2_ref(sub, +1); } ao2_ref(topic, +1); sub->topic = topic; sub->callback = callback; sub->data = data; ast_cond_init(&sub->join_cond, NULL); if (topic_add_subscription(topic, sub) != 0) { return NULL; } send_subscription_subscribe(topic, sub); ao2_ref(sub, +1); return sub; }
static int end_session(struct respoke_session *session, enum respoke_status status, const struct respoke_message *message) { const char *endpoint, *type, *app; if (!session->channel) { return 0; } endpoint = respoke_message_redirected_endpoint_get(message); type = respoke_message_redirected_type_get(message); app = respoke_message_redirected_app_get(message); if (!ast_strlen_zero(endpoint) && !ast_strlen_zero(app)) { if (session->endpoint->redirect == RESPOKE_REDIRECT_INTERNAL) { char id[AST_UUID_STR_LEN]; /* Since we are changing the session-id and because offer adds this session * to the sessions container we need to unlink it now, which bye does */ respoke_session_bye(session, RESPOKE_STATUS_REDIRECTING); session->terminated = 0; ast_string_field_set(session, remote, endpoint); ast_string_field_set(session, remote_type, S_OR(type, "web")); ast_string_field_set(session, remote_appid, app); ast_string_field_set(session, remote_connection, ""); ast_string_field_set(session, session_id, ast_uuid_generate_str(id, sizeof(id))); /* Send a new offer out to the new target, without the caller being any the wiser */ if (!respoke_session_offer(session)) { return 0; } session->terminated = 1; } else if (session->endpoint->redirect == RESPOKE_REDIRECT_CORE) { ast_channel_call_forward_build(session->channel, "Respoke/%s/%s@%s", ast_sorcery_object_get_id(session->endpoint), endpoint, app); } } ast_set_hangupsource(session->channel, ast_channel_name(session->channel), 0); if (!ast_channel_hangupcause(session->channel)) { int cause = hangup_reason_to_cause(status); ast_queue_hangup_with_cause(session->channel, cause); } else { ast_queue_hangup(session->channel); } return 0; }