static int _stp_register_ctl_channel_fs(void) { #ifdef STP_BULKMODE int i; int j; char buf[32]; struct proc_dir_entry *bs = NULL; #endif struct proc_dir_entry *de; if (!_stp_mkdir_proc_module()) goto err0; #ifdef STP_BULKMODE /* now for each cpu "n", create /proc/systemtap/module_name/n */ stp_for_each_cpu(i) { sprintf(buf, "%d", i); de = create_proc_entry(buf, 0600, _stp_proc_root); if (de == NULL) goto err1; de->uid = _stp_uid; de->gid = _stp_gid; de->proc_fops = &_stp_proc_fops; de->data = _stp_kmalloc(sizeof(int)); if (de->data == NULL) { remove_proc_entry(buf, _stp_proc_root); goto err1; } *(int *)de->data = i; } bs = create_proc_read_entry("bufsize", 0, _stp_proc_root, _stp_ctl_read_bufsize, NULL); #endif /* STP_BULKMODE */ /* create /proc/systemtap/module_name/.cmd */ de = create_proc_entry(".cmd", 0600, _stp_proc_root); if (de == NULL) goto err1; de->uid = _stp_uid; de->gid = _stp_gid; de->proc_fops = &_stp_ctl_fops_cmd; return 0; err1: #ifdef STP_BULKMODE for (de = _stp_proc_root->subdir; de; de = de->next) _stp_kfree(de->data); stp_for_each_cpu(j) { if (j == i) break; sprintf(buf, "%d", j); remove_proc_entry(buf, _stp_proc_root); } if (bs) remove_proc_entry("bufsize", _stp_proc_root); #endif /* STP_BULKMODE */ _stp_rmdir_proc_module(); err0: return -1; }
static int add_bad_addr_entry(unsigned long min_addr, unsigned long max_addr, struct addr_map_entry** existing_min, struct addr_map_entry** existing_max) { struct addr_map* new_map = 0; struct addr_map* old_map = 0; struct addr_map_entry* min_entry = 0; struct addr_map_entry* max_entry = 0; struct addr_map_entry* new_entry = 0; size_t existing = 0; unsigned long flags; /* Loop allocating memory for a new entry in the map. */ while (1) { size_t old_size = 0; stp_write_lock_irqsave(&addr_map_lock, flags); old_map = blackmap; if (old_map) old_size = old_map->size; /* Either this is the first time through the loop, or we allocated a map previous time, but someone has come in and added an entry while we were sleeping. */ if (!new_map || (new_map && new_map->size < old_size + 1)) { stp_write_unlock_irqrestore(&addr_map_lock, flags); if (new_map) { _stp_kfree(new_map); new_map = 0; } new_map = _stp_kmalloc(sizeof(*new_map) + sizeof(*new_entry) * (old_size + 1)); if (!new_map) return -ENOMEM; new_map->size = old_size + 1; } else break; } if (!blackmap) { existing = 0; } else { min_entry = lookup_addr_aux(min_addr, 1, blackmap); max_entry = lookup_addr_aux(max_addr, 1, blackmap); if (min_entry || max_entry) { if (existing_min) *existing_min = min_entry; if (existing_max) *existing_max = max_entry; stp_write_unlock_irqrestore(&addr_map_lock, flags); _stp_kfree(new_map); return 1; } existing = upper_bound(min_addr, old_map); } new_entry = &new_map->entries[existing]; new_entry->min = min_addr; new_entry->max = max_addr; if (old_map) { memcpy(&new_map->entries, old_map->entries, existing * sizeof(*new_entry)); if (old_map->size > existing) memcpy(new_entry + 1, &old_map->entries[existing], (old_map->size - existing) * sizeof(*new_entry)); } blackmap = new_map; stp_write_unlock_irqrestore(&addr_map_lock, flags); if (old_map) _stp_kfree(old_map); return 0; }