コード例 #1
0
/********************************************************************
 * FUNCTION set_config_path
 *
 *  Change or set the config path prompt
 *
 * INPUTS:
 *    session_cb == session control block to use
 *
 * RETURNS:
 *   status
 *********************************************************************/
static status_t
    set_config_path (session_cb_t *session_cb)
{
    status_t res = NO_ERR;

    /* update the prompt path string */
    m__free(session_cb->config_path);
    session_cb->config_path = NULL;

    if (session_cb->config_ecurval) {
        ncx_instfmt_t format = NCX_IFMT_CLI2;
        if (session_cb->prompt_type == HELP_MODE_FULL) {
            format = NCX_IFMT_CLI;
        }
        res = val_gen_instance_id(NULL, session_cb->config_ecurval,
                                  format, &session_cb->config_path);
    } else if (session_cb->config_curobj) {
        if (session_cb->prompt_type == HELP_MODE_FULL) {
            res = obj_gen_object_id_xpath(session_cb->config_curobj,
                                          &session_cb->config_path);
        } else {
            res = obj_gen_object_id(session_cb->config_curobj,
                                    &session_cb->config_path);
        }
    }

    return res;

}  /* set_config_path */
コード例 #2
0
ファイル: agt_plock.c プロジェクト: 0xDEC0DE8/OpenYuma
/********************************************************************
* FUNCTION y_ietf_netconf_partial_lock_partial_lock_invoke
* 
* RPC invocation phase
* All constraints have passed at this point.
* Call device instrumentation code in this function.
* 
* INPUTS:
*     see agt/agt_rpc.h for details
* 
* RETURNS:
*     error status
********************************************************************/
static status_t
    y_ietf_netconf_partial_lock_partial_lock_invoke (
        ses_cb_t *scb,
        rpc_msg_t *msg,
        xml_node_t *methnode)
{
    plock_cb_t *plcb;
    val_value_t *testval, *newval;
    xpath_result_t *result;
    xpath_resnode_t *resnode, *clearnode;
    xmlChar *pathbuff;
    cfg_template_t *running;
    status_t res;
    ncx_num_t num;

    res = NO_ERR;

    plcb = (plock_cb_t *)msg->rpc_user1;
    result = plock_get_final_result(plcb);
    running = cfg_get_config_id(NCX_CFGID_RUNNING);

    /* try to lock all the target nodes */
    for (resnode = xpath_get_first_resnode(result);
         resnode != NULL && res == NO_ERR;
         resnode = xpath_get_next_resnode(resnode)) {

        testval = xpath_get_resnode_valptr(resnode);

        res = val_set_partial_lock(testval, plcb);
        if (res != NO_ERR) {
            agt_record_error(scb,
                             &msg->mhdr,
                             NCX_LAYER_OPERATION,
                             res,
                             methnode,
                             NCX_NT_NONE,
                             NULL,
                             NCX_NT_VAL,
                             testval);
        }
    }

    /* unlock any nodes already attempted if any fail */
    if (res != NO_ERR) {
        for (clearnode = xpath_get_first_resnode(result);
             clearnode != NULL;
             clearnode = xpath_get_next_resnode(clearnode)) {

            testval = xpath_get_resnode_valptr(clearnode);

            val_clear_partial_lock(testval, plcb);
            if (clearnode == resnode) {
                return res;
            }
        }
        return res;
    }

    /* add this partial lock to the running config */
    res = cfg_add_partial_lock(running, plcb);
    if (res != NO_ERR) {
        /* should not happen since config lock state could
         * not have changed since validate callback
         */
        agt_record_error(scb,
                         &msg->mhdr,
                         NCX_LAYER_OPERATION,
                         res,
                         methnode,
                         NCX_NT_NONE,
                         NULL,
                         NCX_NT_NONE,
                         NULL);
        for (clearnode = xpath_get_first_resnode(result);
             clearnode != NULL;
             clearnode = xpath_get_next_resnode(clearnode)) {

            testval = xpath_get_resnode_valptr(clearnode);
            val_clear_partial_lock(testval, plcb);
        }
        plock_cb_free(plcb);
        return res;
    }

    /* setup return data only if lock successful
     * cache the reply instead of stream the reply
     * in case there is any error; if so; the partial
     * lock will be backed out and the dataQ cleaned
     * for an error exit; add lock-id leaf first 
     */
    msg->rpc_data_type = RPC_DATA_YANG;
    
    ncx_init_num(&num);
    num.u = plock_get_id(plcb);
    newval = xml_val_new_number
        (y_ietf_netconf_partial_lock_N_lock_id,
         val_get_nsid(msg->rpc_input),
         &num,
         NCX_BT_UINT32);
    ncx_clean_num(NCX_BT_UINT32, &num);

    if (newval == NULL) {
        res = ERR_INTERNAL_MEM;
        agt_record_error(scb, 
                         &msg->mhdr, 
                         NCX_LAYER_OPERATION, 
                         res,
                         methnode, 
                         NCX_NT_NONE, 
                         NULL, 
                         NCX_NT_NONE, 
                         NULL);
    } else {
        dlq_enque(newval, &msg->rpc_dataQ);
    }

    /* add lock-node leaf-list instance for each resnode */
    for (resnode = xpath_get_first_resnode(result);
         resnode != NULL && res == NO_ERR;
         resnode = xpath_get_next_resnode(resnode)) {

        /* Q&D method: generate the i-i string as a plain
         * string and add any needed prefixes to the global
         * prefix map for the reply message (in mhdr)
         */
        pathbuff = NULL;
        testval = xpath_get_resnode_valptr(resnode);
        res = val_gen_instance_id(&msg->mhdr, 
                                  testval,
                                  NCX_IFMT_XPATH1, 
                                  &pathbuff);
        if (res == NO_ERR) {
            /* make leaf; pass off pathbuff malloced memory */
            newval = xml_val_new_string
                (y_ietf_netconf_partial_lock_N_locked_node,
                 val_get_nsid(msg->rpc_input),
                 pathbuff);
            if (newval == NULL) {
                res = ERR_INTERNAL_MEM;
                m__free(pathbuff);
                pathbuff = NULL;
            }
        }

        if (res == NO_ERR) {
            dlq_enque(newval, &msg->rpc_dataQ);
        } else {
            agt_record_error(scb, 
                             &msg->mhdr, 
                             NCX_LAYER_OPERATION, 
                             res,
                             methnode, 
                             NCX_NT_NONE, 
                             NULL, 
                             NCX_NT_NONE, 
                             NULL);
        }
    }

    if (res != NO_ERR) {
        /* back out everything, except waste the lock ID */
        for (clearnode = xpath_get_first_resnode(result);
             clearnode != NULL;
             clearnode = xpath_get_next_resnode(clearnode)) {

            testval = xpath_get_resnode_valptr(clearnode);
            val_clear_partial_lock(testval, plcb);
        }
        cfg_delete_partial_lock(running, plock_get_id(plcb));

        /* clear any data already queued */
        while (!dlq_empty(&msg->rpc_dataQ)) {
            testval = (val_value_t *)
                dlq_deque(&msg->rpc_dataQ);
            val_free_value(testval);
        }
        msg->rpc_data_type = RPC_DATA_NONE;
    }

    return res;

} /* y_ietf_netconf_partial_lock_partial_lock_invoke */