Esempio n. 1
0
/* Self-locked. Not multiple-caller safe */
void pa_memimport_free(pa_memimport *i) {
    pa_memexport *e;
    pa_memblock *b;

    pa_assert(i);

    pa_mutex_lock(i->mutex);

    while ((b = pa_hashmap_first(i->blocks)))
        memblock_replace_import(b);

    pa_assert(pa_hashmap_size(i->segments) == 0);

    pa_mutex_unlock(i->mutex);

    pa_mutex_lock(i->pool->mutex);

    /* If we've exported this block further we need to revoke that export */
    for (e = i->pool->exports; e; e = e->next)
        memexport_revoke_blocks(e, i);

    PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);

    pa_mutex_unlock(i->pool->mutex);

    pa_hashmap_free(i->blocks);
    pa_hashmap_free(i->segments);

    pa_mutex_free(i->mutex);

    pa_xfree(i);
}
Esempio n. 2
0
/* Self-locked. This function is not multiple-caller safe */
static void memblock_replace_import(pa_memblock *b) {
    pa_memimport_segment *seg;

    pa_assert(b);
    pa_assert(b->type == PA_MEMBLOCK_IMPORTED);

    pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
    pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
    pa_atomic_dec(&b->pool->stat.n_imported);
    pa_atomic_sub(&b->pool->stat.imported_size, b->length);

    seg = b->per_type.imported.segment;
    pa_assert(seg);
    pa_assert(seg->import);

    pa_mutex_lock(seg->import->mutex);

    pa_hashmap_remove(
            seg->import->blocks,
            PA_UINT32_TO_PTR(b->per_type.imported.id));

    memblock_make_local(b);

    if (-- seg->n_blocks <= 0) {
        pa_mutex_unlock(seg->import->mutex);
        segment_detach(seg);
    } else
        pa_mutex_unlock(seg->import->mutex);
}
Esempio n. 3
0
void pa_memexport_free(pa_memexport *e) {
    pa_assert(e);

    pa_mutex_lock(e->mutex);
    while (e->used_slots)
        pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
    pa_mutex_unlock(e->mutex);

    pa_mutex_lock(e->pool->mutex);
    PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
    pa_mutex_unlock(e->pool->mutex);

    pa_mutex_free(e->mutex);
    pa_xfree(e);
}
Esempio n. 4
0
int pa_asyncmsgq_send(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk) {
    struct asyncmsgq_item i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    i.code = code;
    i.object = object;
    i.userdata = (void*) userdata;
    i.free_cb = NULL;
    i.ret = -1;
    i.offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i.memchunk = *chunk;
    } else
        pa_memchunk_reset(&i.memchunk);

    if (!(i.semaphore = pa_flist_pop(PA_STATIC_FLIST_GET(semaphores))))
        i.semaphore = pa_semaphore_new(0);

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_assert_se(pa_asyncq_push(a->asyncq, &i, true) == 0);
    pa_mutex_unlock(a->mutex);

    pa_semaphore_wait(i.semaphore);

    if (pa_flist_push(PA_STATIC_FLIST_GET(semaphores), i.semaphore) < 0)
        pa_semaphore_free(i.semaphore);

    return i.ret;
}
Esempio n. 5
0
void pa_asyncmsgq_post(pa_asyncmsgq *a, pa_msgobject *object, int code, const void *userdata, int64_t offset, const pa_memchunk *chunk, pa_free_cb_t free_cb) {
    struct asyncmsgq_item *i;
    pa_assert(PA_REFCNT_VALUE(a) > 0);

    if (!(i = pa_flist_pop(PA_STATIC_FLIST_GET(asyncmsgq))))
        i = pa_xnew(struct asyncmsgq_item, 1);

    i->code = code;
    i->object = object ? pa_msgobject_ref(object) : NULL;
    i->userdata = (void*) userdata;
    i->free_cb = free_cb;
    i->offset = offset;
    if (chunk) {
        pa_assert(chunk->memblock);
        i->memchunk = *chunk;
        pa_memblock_ref(i->memchunk.memblock);
    } else
        pa_memchunk_reset(&i->memchunk);
    i->semaphore = NULL;

    /* This mutex makes the queue multiple-writer safe. This lock is only used on the writing side */
    pa_mutex_lock(a->mutex);
    pa_asyncq_post(a->asyncq, i);
    pa_mutex_unlock(a->mutex);
}
Esempio n. 6
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {
/*         raise(SIGTRAP);  */
        pa_log_warn("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}
Esempio n. 7
0
static void thread_func(void *data) {
    pa_tls_set(tls, data);

    pa_log_info("thread_func() for %s starting...", (char*) pa_tls_get(tls));

    pa_mutex_lock(mutex);

    for (;;) {
        int k, n;

        pa_log_info("%s waiting ...", (char*) pa_tls_get(tls));

        for (;;) {

            if (magic_number < 0)
                goto quit;

            if (magic_number != 0)
                break;

            pa_cond_wait(cond1, mutex);
        }

        k = magic_number;
        magic_number = 0;

        pa_mutex_unlock(mutex);

        pa_run_once(&once, once_func);

        pa_cond_signal(cond2, 0);

        pa_log_info("%s got number %i", (char*) pa_tls_get(tls), k);

        /* Spin! */
        for (n = 0; n < k; n++)
            pa_thread_yield();

        pa_mutex_lock(mutex);
    }

quit:

    pa_mutex_unlock(mutex);

    pa_log_info("thread_func() for %s done...", (char*) pa_tls_get(tls));
}
Esempio n. 8
0
void pa_threaded_mainloop_unlock(pa_threaded_mainloop *m) {
    pa_assert(m);

    /* Make sure that this function is not called from the helper thread */
    pa_assert(!m->thread || !pa_thread_is_running(m->thread) || !in_worker(m));

    pa_mutex_unlock(m->mutex);
}
Esempio n. 9
0
void pa_aupdate_write_end(pa_aupdate *a) {
    pa_assert(a);

    if (!a->swapped)
        pa_aupdate_write_swap(a);

    pa_mutex_unlock(a->write_lock);
}
Esempio n. 10
0
bool pa_ratelimit_test(pa_ratelimit *r, pa_log_level_t t) {
    pa_usec_t now;
    pa_mutex *m;

    now = pa_rtclock_now();

    m = pa_static_mutex_get(&mutex, false, false);
    pa_mutex_lock(m);

    pa_assert(r);
    pa_assert(r->interval > 0);
    pa_assert(r->burst > 0);

    if (r->begin <= 0 ||
        r->begin + r->interval < now) {

        if (r->n_missed > 0)
            pa_logl(t, "%u events suppressed", r->n_missed);

        r->begin = now;

        /* Reset counters */
        r->n_printed = 0;
        r->n_missed = 0;
        goto good;
    }

    if (r->n_printed <= r->burst)
        goto good;

    r->n_missed++;
    pa_mutex_unlock(m);
    return false;

good:
    r->n_printed++;
    pa_mutex_unlock(m);
    return true;
}
Esempio n. 11
0
/* Self-locked */
int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
    pa_memblock *b;

    pa_assert(e);

    pa_mutex_lock(e->mutex);

    if (id >= e->n_init)
        goto fail;

    if (!e->slots[id].block)
        goto fail;

    b = e->slots[id].block;
    e->slots[id].block = NULL;

    PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
    PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);

    pa_mutex_unlock(e->mutex);

/*     pa_log("Processing release for %u", id); */

    pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
    pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);

    pa_atomic_dec(&e->pool->stat.n_exported);
    pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);

    pa_memblock_unref(b);

    return 0;

fail:
    pa_mutex_unlock(e->mutex);

    return -1;
}
Esempio n. 12
0
static void unref(pa_bool_t after_fork) {

    pa_assert(n_ref > 0);
    pa_assert(pipe_fd[0] >= 0);
    pa_assert(pipe_fd[1] >= 0);
    pa_assert(lock_fd_mutex);

    n_ref--;

    if (n_ref > 0)
        return;

    if (thread) {
        pa_thread_free(thread);
        thread = NULL;
    }

    pa_mutex_lock(lock_fd_mutex);

    pa_assert(state != STATE_TAKEN);

    if (state == STATE_OWNING) {

        pa_assert(lock_fd >= 0);

        if (after_fork)
            pa_close(lock_fd);
        else {
            char *lf;

            if (!(lf = pa_runtime_path(AUTOSPAWN_LOCK)))
                pa_log_warn(_("Cannot access autospawn lock."));

            pa_unlock_lockfile(lf, lock_fd);
            pa_xfree(lf);
        }
    }

    lock_fd = -1;
    state = STATE_IDLE;

    pa_mutex_unlock(lock_fd_mutex);

    pa_mutex_free(lock_fd_mutex);
    lock_fd_mutex = NULL;

    pa_close(pipe_fd[0]);
    pa_close(pipe_fd[1]);
    pipe_fd[0] = pipe_fd[1] = -1;
}
Esempio n. 13
0
/* Self-locked */
pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
    pa_memblock *b = NULL;
    pa_memimport_segment *seg;

    pa_assert(i);

    pa_mutex_lock(i->mutex);

    if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
        pa_memblock_ref(b);
        goto finish;
    }

    if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
        goto finish;

    if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
        if (!(seg = segment_attach(i, shm_id)))
            goto finish;

    if (offset+size > seg->memory.size)
        goto finish;

    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
        b = pa_xnew(pa_memblock, 1);

    PA_REFCNT_INIT(b);
    b->pool = i->pool;
    b->type = PA_MEMBLOCK_IMPORTED;
    b->read_only = true;
    b->is_silence = false;
    pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
    b->length = size;
    pa_atomic_store(&b->n_acquired, 0);
    pa_atomic_store(&b->please_signal, 0);
    b->per_type.imported.id = block_id;
    b->per_type.imported.segment = seg;

    pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);

    seg->n_blocks++;

    stat_add(b);

finish:
    pa_mutex_unlock(i->mutex);

    return b;
}
Esempio n. 14
0
void pa_threaded_mainloop_stop(pa_threaded_mainloop *m) {
    pa_assert(m);

    if (!m->thread || !pa_thread_is_running(m->thread))
        return;

    /* Make sure that this function is not called from the helper thread */
    pa_assert(!in_worker(m));

    pa_mutex_lock(m->mutex);
    pa_mainloop_quit(m->real_mainloop, 0);
    pa_mutex_unlock(m->mutex);

    pa_thread_join(m->thread);
}
Esempio n. 15
0
static void thread_func(void *u) {
    int fd;
    char *lf;
    sigset_t fullset;

    /* No signals in this thread please */
    sigfillset(&fullset);
    pthread_sigmask(SIG_BLOCK, &fullset, NULL);

    if (!(lf = pa_runtime_path(AUTOSPAWN_LOCK))) {
        pa_log_warn(_("Cannot access autospawn lock."));
        goto fail;
    }

    if ((fd = pa_lock_lockfile(lf)) < 0)
        goto fail;

    pa_mutex_lock(lock_fd_mutex);
    pa_assert(state == STATE_IDLE);
    lock_fd = fd;
    state = STATE_OWNING;
    pa_mutex_unlock(lock_fd_mutex);

    goto finish;

fail:
    pa_mutex_lock(lock_fd_mutex);
    pa_assert(state == STATE_IDLE);
    state = STATE_FAILED;
    pa_mutex_unlock(lock_fd_mutex);

finish:
    pa_xfree(lf);

    ping();
}
Esempio n. 16
0
static int poll_func(struct pollfd *ufds, unsigned long nfds, int timeout, void *userdata) {
    pa_mutex *mutex = userdata;
    int r;

    pa_assert(mutex);

    /* Before entering poll() we unlock the mutex, so that
     * avahi_simple_poll_quit() can succeed from another thread. */

    pa_mutex_unlock(mutex);
    r = pa_poll(ufds, nfds, timeout);
    pa_mutex_lock(mutex);

    return r;
}
Esempio n. 17
0
static void thread(void *userdata) {
    pa_threaded_mainloop *m = userdata;

#ifndef OS_IS_WIN32
    sigset_t mask;

    /* Make sure that signals are delivered to the main thread */
    sigfillset(&mask);
    pthread_sigmask(SIG_BLOCK, &mask, NULL);
#endif

    pa_mutex_lock(m->mutex);

    pa_mainloop_run(m->real_mainloop, NULL);

    pa_mutex_unlock(m->mutex);
}
Esempio n. 18
0
int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
    pa_memblock *b;
    int ret = 0;
    pa_assert(i);

    pa_mutex_lock(i->mutex);

    if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
        ret = -1;
        goto finish;
    }

    memblock_replace_import(b);

finish:
    pa_mutex_unlock(i->mutex);

    return ret;
}
Esempio n. 19
0
/* For receiving blocks from other nodes */
pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
    pa_memimport *i;

    pa_assert(p);
    pa_assert(cb);

    i = pa_xnew(pa_memimport, 1);
    i->mutex = pa_mutex_new(true, true);
    i->pool = p;
    i->segments = pa_hashmap_new(NULL, NULL);
    i->blocks = pa_hashmap_new(NULL, NULL);
    i->release_cb = cb;
    i->userdata = userdata;

    pa_mutex_lock(p->mutex);
    PA_LLIST_PREPEND(pa_memimport, p->imports, i);
    pa_mutex_unlock(p->mutex);

    return i;
}
Esempio n. 20
0
/* Self-locked */
static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
    struct memexport_slot *slot, *next;
    pa_assert(e);
    pa_assert(i);

    pa_mutex_lock(e->mutex);

    for (slot = e->used_slots; slot; slot = next) {
        uint32_t idx;
        next = slot->next;

        if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
            slot->block->per_type.imported.segment->import != i)
            continue;

        idx = (uint32_t) (slot - e->slots);
        e->revoke_cb(e, idx, e->userdata);
        pa_memexport_process_release(e, idx);
    }

    pa_mutex_unlock(e->mutex);
}
Esempio n. 21
0
int pa_cond_wait(pa_cond *c, pa_mutex *m) {
    HANDLE event;

    assert(c);
    assert(m);

    event = CreateEvent(NULL, FALSE, FALSE, NULL);
    assert(event);

    pa_hashmap_put(c->wait_events, event, event);

    pa_mutex_unlock(m);

    WaitForSingleObject(event, INFINITE);

    pa_mutex_lock(m);

    pa_hashmap_remove(c->wait_events, event);

    CloseHandle(event);

    return 0;
}
Esempio n. 22
0
/* For sending blocks to other nodes */
pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
    pa_memexport *e;

    pa_assert(p);
    pa_assert(cb);

    if (!p->memory.shared)
        return NULL;

    e = pa_xnew(pa_memexport, 1);
    e->mutex = pa_mutex_new(true, true);
    e->pool = p;
    PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
    PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
    e->n_init = 0;
    e->revoke_cb = cb;
    e->userdata = userdata;

    pa_mutex_lock(p->mutex);
    PA_LLIST_PREPEND(pa_memexport, p->exports, e);
    pa_mutex_unlock(p->mutex);
    return e;
}
Esempio n. 23
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
        case PA_MEMBLOCK_APPENDED :
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_IMPORTED : {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            segment = b->per_type.imported.segment;
            pa_assert(segment);
            import = segment->import;
            pa_assert(import);

            pa_mutex_lock(import->mutex);
            pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);
            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            int call_free;

            slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
            pa_assert(slot);

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
Esempio n. 24
0
static void memblock_free(pa_memblock *b) {
    pa_assert(b);

    pa_assert(pa_atomic_load(&b->n_acquired) == 0);

    stat_remove(b);

    switch (b->type) {
        case PA_MEMBLOCK_USER :
            pa_assert(b->per_type.user.free_cb);
            b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));

            /* Fall through */

        case PA_MEMBLOCK_FIXED:
            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;

        case PA_MEMBLOCK_APPENDED:

            /* We could attach it to unused_memblocks, but that would
             * probably waste some considerable amount of memory */
            pa_xfree(b);
            break;

        case PA_MEMBLOCK_IMPORTED: {
            pa_memimport_segment *segment;
            pa_memimport *import;

            /* FIXME! This should be implemented lock-free */

            pa_assert_se(segment = b->per_type.imported.segment);
            pa_assert_se(import = segment->import);

            pa_mutex_lock(import->mutex);

            pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));

            pa_assert(segment->n_blocks >= 1);
            if (-- segment->n_blocks <= 0)
                segment_detach(segment);

            pa_mutex_unlock(import->mutex);

            import->release_cb(import, b->per_type.imported.id, import->userdata);

            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_POOL_EXTERNAL:
        case PA_MEMBLOCK_POOL: {
            struct mempool_slot *slot;
            bool call_free;

            pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));

            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;

/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
/*             if (PA_UNLIKELY(pa_in_valgrind())) { */
/*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
/*             } */
/* #endif */

            /* The free list dimensions should easily allow all slots
             * to fit in, hence try harder if pushing this slot into
             * the free list fails */
            while (pa_flist_push(b->pool->free_slots, slot) < 0)
                ;

            if (call_free)
                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
                    pa_xfree(b);

            break;
        }

        case PA_MEMBLOCK_TYPE_MAX:
        default:
            pa_assert_not_reached();
    }
}
Esempio n. 25
0
void pa_mempool_free(pa_mempool *p) {
    pa_assert(p);

    pa_mutex_lock(p->mutex);

    while (p->imports)
        pa_memimport_free(p->imports);

    while (p->exports)
        pa_memexport_free(p->exports);

    pa_mutex_unlock(p->mutex);

    pa_flist_free(p->free_slots, NULL);

    if (pa_atomic_load(&p->stat.n_allocated) > 0) {

        /* Ouch, somebody is retaining a memory block reference! */

#ifdef DEBUG_REF
        unsigned i;
        pa_flist *list;

        /* Let's try to find at least one of those leaked memory blocks */

        list = pa_flist_new(p->n_blocks);

        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
            struct mempool_slot *slot;
            pa_memblock *b, *k;

            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
            b = mempool_slot_data(slot);

            while ((k = pa_flist_pop(p->free_slots))) {
                while (pa_flist_push(list, k) < 0)
                    ;

                if (b == k)
                    break;
            }

            if (!k)
                pa_log("REF: Leaked memory block %p", b);

            while ((k = pa_flist_pop(list)))
                while (pa_flist_push(p->free_slots, k) < 0)
                    ;
        }

        pa_flist_free(list, NULL);

#endif

        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));

/*         PA_DEBUG_TRAP; */
    }

    pa_shm_free(&p->memory);

    pa_mutex_free(p->mutex);
    pa_semaphore_free(p->semaphore);

    pa_xfree(p);
}