void shm_event_raise(long used, long size, long perc) { evi_params_p list = 0; *event_shm_pending = 1; *event_shm_last = perc; // event has to be triggered - check for subscribers if (!evi_probe_event(EVI_SHM_THRESHOLD_ID)) { goto end; } if (!(list = evi_get_params())) goto end; if (evi_param_add_int(list, &shm_usage_str, (int *)&perc)) { LM_ERR("unable to add usage parameter\n"); goto end; } if (evi_param_add_int(list, &shm_threshold_str, (int *)&event_shm_threshold)) { LM_ERR("unable to add threshold parameter\n"); goto end; } if (evi_param_add_int(list, &shm_used_str, (int *)&used)) { LM_ERR("unable to add used parameter\n"); goto end; } if (evi_param_add_int(list, &shm_size_str, (int *)&size)) { LM_ERR("unable to add size parameter\n"); goto end; } /* * event has to be raised without the lock otherwise a deadlock will be * generated by the transport modules, or by the event_route processing */ #ifdef HP_MALLOC shm_unlock(0); #else shm_unlock(); #endif if (evi_raise_event(EVI_SHM_THRESHOLD_ID, list)) { LM_ERR("unable to send shm threshold event\n"); } #ifdef HP_MALLOC shm_lock(0); #else shm_lock(); #endif list = 0; end: if (list) evi_free_params(list); *event_shm_pending = 0; }
/** * Clean up on exit. This should be called before exiting. * \param show_status set to one to display the mem status */ void cleanup(int show_status) { LM_INFO("cleanup\n"); /*clean-up*/ /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ if (mem_lock) #ifdef HP_MALLOC { int i; for (i = 0; i < HP_HASH_SIZE; i++) shm_unlock(i); } #else shm_unlock(); #endif handle_ql_shutdown(); destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); destroy_stats_collector(); destroy_script_cb(); pv_free_extra_list(); destroy_argv_list(); destroy_black_lists(); #ifdef PKG_MALLOC if (show_status){ LM_GEN1(memdump, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM cleanup_debug(); if (pt) shm_free(pt); pt=0; if (show_status){ LM_GEN1(memdump, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }
/** call it before exiting; if show_status==1, mem status is displayed */ void destroy_memory(int show_status) { /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ #ifdef SHM_MEM if (show_status){ LOG(memlog, "Memory status (shm):\n"); //shm_status(); #ifndef SER_MOD_INTERFACE shm_sums(); #endif } /* zero all shmem alloc vars that we still use */ #ifdef WHARF #else shm_mem_destroy(); #endif #endif #ifdef PKG_MALLOC if (show_status){ LOG(memlog, "Memory status (pkg):\n"); //pkg_status(); #ifndef SER_MOD_INTERFACE pkg_sums(); #endif } #endif }
void qm_shm_info(void* qmp, struct mem_info* info) { shm_lock(); qm_info(qmp, info); shm_unlock(); }
void qm_shm_free(void* qmp, void* p, const char* file, const char* func, unsigned int line) { shm_lock(); qm_free(qmp, p, file, func, line); shm_unlock(); }
bool SHMSrc::resize_area() { while ((sizeof(SHMHeader) + shm_area_->buffer_size) > shm_area_len_) { size_t new_size = sizeof(SHMHeader) + shm_area_->buffer_size; shm_unlock(); if (munmap(shm_area_, shm_area_len_)) { std::cerr << "Could not unmap shared area" << std::endl; perror(strerror(errno)); return false; } shm_area_ = static_cast<SHMHeader*>(mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0)); shm_area_len_ = new_size; if (!shm_area_) { shm_area_ = 0; std::cerr << "Could not remap shared area" << std::endl; return false; } shm_area_len_ = new_size; shm_lock(); } return true; }
bool SHMSink::resize_area(size_t desired_length) { if (desired_length <= shm_area_len_) return true; shm_unlock(); if (munmap(shm_area_, shm_area_len_)) { ERROR("Could not unmap shared area"); strErr(); return false; } if (ftruncate(fd_, desired_length)) { ERROR("Could not resize shared area"); strErr(); return false; } shm_area_ = static_cast<SHMHeader*>(mmap(NULL, desired_length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0)); shm_area_len_ = desired_length; if (shm_area_ == MAP_FAILED) { shm_area_ = 0; ERROR("Could not remap shared area"); return false; } shm_lock(); return true; }
unsigned long qm_shm_available(void* qmp) { unsigned long r; shm_lock(); r = qm_available(qmp); shm_unlock(); return r; }
void* qm_shm_realloc(void* qmp, void* p, unsigned long size) { void *r; shm_lock(); r = qm_realloc(qmp, p, size); shm_unlock(); return r; }
unsigned long shm_available_safe() { unsigned long ret; shm_lock(); ret = shm_available(); shm_unlock(); return ret; }
void* qm_shm_realloc(void* qmp, void* p, unsigned long size, const char* file, const char* func, unsigned int line) { void *r; shm_lock(); r = qm_realloc(qmp, p, size, file, func, line); shm_unlock(); return r; }
void* qm_shm_resize(void* qmp, void* p, unsigned long size) { void *r; shm_lock(); if(p) qm_free(qmp, p); r = qm_malloc(qmp, size); shm_unlock(); return r; }
inline static void* sh_realloc(void* p, unsigned int size) { void *r; shm_lock(); shm_free_unsafe(p); r=shm_malloc_unsafe(size); shm_unlock(); return r; }
static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp) { if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); }
static inline void shm_inc (int id) { struct shmid_kernel *shp; if(!(shp = shm_lock(id))) BUG(); shp->shm_atim = get_seconds(); shp->shm_lprid = current->tgid; shp->shm_nattch++; shm_unlock(shp); }
static inline void shm_inc (int id) { struct shmid_kernel *shp; if(!(shp = shm_lock(id))) BUG(); shp->shm_atim = CURRENT_TIME; shp->shm_lprid = current->pid; shp->shm_nattch++; shm_unlock(id); }
void SHMSrc::render(char *dest, size_t len) { shm_lock(); while (buffer_gen_ == shm_area_->buffer_gen) { shm_unlock(); std::cerr << "Waiting for next buffer" << std::endl;; sem_wait(&shm_area_->notification); shm_lock(); } if (!resize_area()) return; std::cerr << "Reading from buffer!" << std::endl; memcpy(dest, shm_area_->data, len); buffer_gen_ = shm_area_->buffer_gen; shm_unlock(); }
/* append a newly received tag from a 200/INVITE to * transaction's set; (only safe if called from within * a REPLY_LOCK); it returns 1 if such a to tag already * exists */ inline static int update_totag_set(struct cell *t, struct sip_msg *ok) { struct totag_elem *i, *n; str *tag; char *s; if (!ok->to || !ok->to->parsed) { LM_ERR("to not parsed\n"); return 0; } tag=&get_to(ok)->tag_value; if (!tag->s) { LM_DBG("no tag in to\n"); return 0; } for (i=t->fwded_totags; i; i=i->next) { if (i->tag.len==tag->len && memcmp(i->tag.s, tag->s, tag->len) ==0 ){ /* to tag already recorded */ #ifdef XL_DEBUG LM_CRIT("totag retransmission\n"); #else LM_DBG("totag retransmission\n"); #endif return 1; } } /* that's a new to-tag -- record it */ #ifndef HP_MALLOC shm_lock(); n=(struct totag_elem*) shm_malloc_unsafe(sizeof(struct totag_elem)); s=(char *)shm_malloc_unsafe(tag->len); shm_unlock(); #else n=(struct totag_elem*) shm_malloc(sizeof(struct totag_elem)); s=(char *)shm_malloc(tag->len); #endif if (!s || !n) { LM_ERR("no more share memory \n"); if (n) shm_free(n); if (s) shm_free(s); return 0; } memset(n, 0, sizeof(struct totag_elem)); memcpy(s, tag->s, tag->len ); n->tag.s=s;n->tag.len=tag->len; n->next=t->fwded_totags; t->fwded_totags=n; LM_DBG("new totag \n"); return 0; }
/* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); }
void SHMSink::render(const std::vector<unsigned char> &data) { shm_lock(); if (!resize_area(sizeof(SHMHeader) + data.size())) return; memcpy(shm_area_->data, data.data(), data.size()); shm_area_->buffer_size = data.size(); shm_area_->buffer_gen++; sem_post(&shm_area_->notification); shm_unlock(); }
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); }
/** * Clean up on exit. This should be called before exiting. * \param show_status set to one to display the mem status */ void cleanup(int show_status) { LM_INFO("cleanup\n"); /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ handle_ql_shutdown(); destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); destroy_stats_collector(); destroy_script_cb(); pv_free_extra_list(); destroy_argv_list(); destroy_black_lists(); #ifdef CHANGEABLE_DEBUG_LEVEL if (debug!=&debug_init) { reset_proc_debug_level(); debug_init = *debug; shm_free(debug); debug = &debug_init; } #endif #ifdef PKG_MALLOC if (show_status){ LM_GEN1(memdump, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM if (pt) shm_free(pt); pt=0; if (show_status){ LM_GEN1(memdump, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }
void SHMSink::render_callback(VideoProvider &provider, size_t bytes) { shm_lock(); if (!resize_area(sizeof(SHMHeader) + bytes)) { ERROR("Could not resize area"); return; } provider.fillBuffer(static_cast<void*>(shm_area_->data)); shm_area_->buffer_size = bytes; shm_area_->buffer_gen++; sem_post(&shm_area_->notification); shm_unlock(); }
void free_cell( struct cell* dead_cell ) { char *b; int i; struct sip_msg *rpl; struct totag_elem *tt, *foo; release_cell_lock( dead_cell ); shm_lock(); /* UA Server */ if ( dead_cell->uas.request ) sip_msg_free_unsafe( dead_cell->uas.request ); if ( dead_cell->uas.response.buffer ) shm_free_unsafe( dead_cell->uas.response.buffer ); /* completion callback */ if (dead_cell->cbp) shm_free_unsafe(dead_cell->cbp); /* UA Clients */ for ( i =0 ; i<dead_cell->nr_of_outgoings; i++ ) { /* retransmission buffer */ if ( (b=dead_cell->uac[i].request.buffer) ) shm_free_unsafe( b ); b=dead_cell->uac[i].local_cancel.buffer; if (b!=0 && b!=BUSY_BUFFER) shm_free_unsafe( b ); rpl=dead_cell->uac[i].reply; if (rpl && rpl!=FAKED_REPLY) { sip_msg_free_unsafe( rpl ); } } /* collected to tags */ tt=dead_cell->fwded_totags; while(tt) { foo=tt->next; shm_free_unsafe(tt->tag.s); shm_free_unsafe(tt); tt=foo; } /* the cell's body */ shm_free_unsafe( dead_cell ); shm_unlock(); }
/* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); }
void copy_buf_to_shm_log_buf(logf_t *_logf) { if(!_logf || _logf->_inner_log_buf_len < 1) { return; } shm_lock(_logf->_shm_log_buf); if(*(_logf->log_buf_len) + _logf->_inner_log_buf_len > _logf->log_buf_size) { write(_logf->LOG_FD, _logf->log_buf, *(_logf->log_buf_len)); *(_logf->log_buf_len) = 0; } memcpy(_logf->log_buf + (*_logf->log_buf_len), _logf->_inner_log_buf, _logf->_inner_log_buf_len); *(_logf->log_buf_len) += _logf->_inner_log_buf_len; shm_unlock(_logf->_shm_log_buf); _logf->_inner_log_buf_len = 0; }
/* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); }
inline static void* sh_realloc(void* p, unsigned int size) { void *r; #ifndef HP_MALLOC shm_lock(); shm_free_unsafe(p); r = shm_malloc_unsafe(size); #else shm_free(p); r = shm_malloc(size); #endif shm_threshold_check(); #ifndef HP_MALLOC shm_unlock(); #endif return r; }
/** * @brief 書き込まれたTIDが指定したtid以上になるまで待つ * @retval 1 成功 * @retval 0 失敗 * @retval -1 エラー */ int shm_cond_wait( ssm_header *shm_p, SSM_tid tid ) { int ret = 0; struct timeval now; struct timespec tout; if( tid <= shm_get_tid_top( shm_p ) ) return 1; gettimeofday( &now, NULL ); tout.tv_sec = now.tv_sec + 1; tout.tv_nsec = now.tv_usec * 1000; if( !shm_lock( shm_p ) ) return -1; while( tid > shm_get_tid_top( shm_p ) && (ret == 0) ) { ret = pthread_cond_timedwait( &shm_p->cond, &shm_p->mutex, &tout ); } if( !shm_unlock( shm_p ) ) return -1; return ( ret == 0 ); }
/* call it before exiting; if show_status==1, mem status is displayed */ void cleanup(show_status) { /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); close_unixsock_server(); destroy_fifo(); destroy_script_cb(); #ifdef PKG_MALLOC if (show_status){ LOG(memlog, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM if (pt) shm_free(pt); pt=0; if (show_status){ LOG(memlog, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }