static void destroy_locked_locks(erts_lc_locked_locks_t *l_lcks) { ASSERT(l_lcks->thread_name); free((void *) l_lcks->thread_name); ASSERT(l_lcks->required.first == NULL); ASSERT(l_lcks->required.last == NULL); ASSERT(l_lcks->locked.first == NULL); ASSERT(l_lcks->locked.last == NULL); lc_lock(); if (l_lcks->prev) l_lcks->prev->next = l_lcks->next; else { ASSERT(erts_locked_locks == l_lcks); erts_locked_locks = l_lcks->next; } if (l_lcks->next) l_lcks->next->prev = l_lcks->prev; lc_unlock(); free((void *) l_lcks); }
static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static erts_lc_locked_locks_t * create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) lc_abort(); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) lc_abort(); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; l_lcks->locked.first = NULL; l_lcks->locked.last = NULL; l_lcks->prev = NULL; lc_lock(); l_lcks->next = erts_locked_locks; if (erts_locked_locks) erts_locked_locks->prev = l_lcks; erts_locked_locks = l_lcks; lc_unlock(); erts_tsd_set(locks_key, (void *) l_lcks); return l_lcks; }
static void *lc_core_alloc(void) { int i; erts_lc_free_block_t *fbs; lc_unlock(); fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t) * ERTS_LC_FB_CHUNK_SIZE); if (!fbs) { erts_fprintf(stderr, "Lock checker failed to allocate memory!\n"); lc_abort(); } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t)); #endif fbs[i].next = &fbs[i+1]; } #ifdef DEBUG memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1], 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks; free_blocks = &fbs[1]; return (void *) &fbs[0]; }
static ERTS_INLINE void lc_free(void *p) { erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p; #ifdef DEBUG memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t)); #endif lc_lock(); fb->next = free_blocks; free_blocks = fb; lc_unlock(); }
static ERTS_INLINE void *lc_alloc(void) { void *res; lc_lock(); if (!free_blocks) res = lc_core_alloc(); else { res = (void *) free_blocks; free_blocks = free_blocks->next; } lc_unlock(); return res; }
/** * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents * @mdev: DRBD device. */ void drbd_al_apply_to_bm(struct drbd_conf *mdev) { unsigned int enr; unsigned long add = 0; char ppb[10]; int i; wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); for (i = 0; i < mdev->act_log->nr_elements; i++) { enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; add += drbd_bm_ALe_set_all(mdev, enr); } lc_unlock(mdev->act_log); wake_up(&mdev->al_wait); dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n", ppsize(ppb, Bit2KB(add))); }
static void *lc_core_alloc(void) { lc_unlock(); erts_fprintf(stderr, "Lock checker out of memory!\n"); lc_abort(); }
/** * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents * @mdev: DRBD device. * * Called when we detach (unconfigure) local storage, * or when we go from R_PRIMARY to R_SECONDARY role. */ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) { int i, nr_elements; unsigned int enr; struct bio **bios; struct drbd_atodb_wait wc; ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) return; /* sorry, I don't have any act_log etc... */ wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); nr_elements = mdev->act_log->nr_elements; /* GFP_KERNEL, we are not in anyone's write-out path */ bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); if (!bios) goto submit_one_by_one; atomic_set(&wc.count, 0); init_completion(&wc.io_done); wc.mdev = mdev; wc.error = 0; for (i = 0; i < nr_elements; i++) { enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; /* next statement also does atomic_inc wc.count and local_cnt */ if (atodb_prepare_unless_covered(mdev, bios, enr/AL_EXT_PER_BM_SECT, &wc)) goto free_bios_submit_one_by_one; } /* unnecessary optimization? */ lc_unlock(mdev->act_log); wake_up(&mdev->al_wait); /* all prepared, submit them */ for (i = 0; i < nr_elements; i++) { if (bios[i] == NULL) break; if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { bios[i]->bi_rw = WRITE; bio_endio(bios[i], -EIO); } else { submit_bio(WRITE, bios[i]); } } drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); /* always (try to) flush bitmap to stable storage */ drbd_md_flush(mdev); /* In case we did not submit a single IO do not wait for * them to complete. ( Because we would wait forever here. ) * * In case we had IOs and they are already complete, there * is not point in waiting anyways. * Therefore this if () ... */ if (atomic_read(&wc.count)) wait_for_completion(&wc.io_done); put_ldev(mdev); kfree(bios); return; free_bios_submit_one_by_one: /* free everything by calling the endio callback directly. */ for (i = 0; i < nr_elements && bios[i]; i++) bio_endio(bios[i], 0); kfree(bios); submit_one_by_one: dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); for (i = 0; i < mdev->act_log->nr_elements; i++) { enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; /* Really slow: if we have al-extents 16..19 active, * sector 4 will be written four times! Synchronous! */ drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); } lc_unlock(mdev->act_log); wake_up(&mdev->al_wait); put_ldev(mdev); }
static void *lc_core_alloc(void) { lc_unlock(); ERTS_INTERNAL_ERROR("Lock checker out of memory!\n"); }