int crisv32_arbiter_unwatch(int id) { reg_marb_rw_intr_mask intr_mask = REG_RD(marb, regi_marb, rw_intr_mask); crisv32_arbiter_init(); spin_lock(&arbiter_lock); if ((id < 0) || (id >= NUMBER_OF_BP) || (!watches[id].used)) { spin_unlock(&arbiter_lock); return -EINVAL; } memset(&watches[id], 0, sizeof(struct crisv32_watch_entry)); if (id == 0) intr_mask.bp0 = regk_marb_no; else if (id == 1) intr_mask.bp1 = regk_marb_no; else if (id == 2) intr_mask.bp2 = regk_marb_no; else if (id == 3) intr_mask.bp3 = regk_marb_no; REG_WR(marb, regi_marb, rw_intr_mask, intr_mask); spin_unlock(&arbiter_lock); return 0; }
int crisv32_arbiter_watch(unsigned long start, unsigned long size, unsigned long clients, unsigned long accesses, watch_callback *cb) { int i; crisv32_arbiter_init(); if (start > 0x80000000) { printk(KERN_ERR "Arbiter: %lX doesn't look like a " "physical address", start); return -EFAULT; } spin_lock(&arbiter_lock); for (i = 0; i < NUMBER_OF_BP; i++) { if (!watches[i].used) { reg_marb_rw_intr_mask intr_mask = REG_RD(marb, regi_marb, rw_intr_mask); watches[i].used = 1; watches[i].start = start; watches[i].end = start + size; watches[i].cb = cb; REG_WR_INT(marb_bp, watches[i].instance, rw_first_addr, watches[i].start); REG_WR_INT(marb_bp, watches[i].instance, rw_last_addr, watches[i].end); REG_WR_INT(marb_bp, watches[i].instance, rw_op, accesses); REG_WR_INT(marb_bp, watches[i].instance, rw_clients, clients); if (i == 0) intr_mask.bp0 = regk_marb_yes; else if (i == 1) intr_mask.bp1 = regk_marb_yes; else if (i == 2) intr_mask.bp2 = regk_marb_yes; else if (i == 3) intr_mask.bp3 = regk_marb_yes; REG_WR(marb, regi_marb, rw_intr_mask, intr_mask); spin_unlock(&arbiter_lock); return i; } } spin_unlock(&arbiter_lock); return -ENOMEM; }
int crisv32_arbiter_allocate_bandwidth(int client, int region, unsigned long bandwidth) { int i; int total_assigned = 0; int total_clients = 0; int req; crisv32_arbiter_init(); for (i = 0; i < NBR_OF_CLIENTS; i++) { total_assigned += requested_slots[region][i]; total_clients += active_clients[region][i]; } /* Avoid division by 0 for 0-bandwidth requests. */ req = bandwidth == 0 ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth); /* * We make sure that there are enough slots only for non-zero * requests. Requesting 0 bandwidth *may* allocate slots, * though if all bandwidth is allocated, such a client won't * get any and will have to rely on getting memory access * according to the fixed scheme that's the default when one * of the slot-allocated clients doesn't claim their slot. */ if (total_assigned + req > NBR_OF_SLOTS) return -ENOMEM; active_clients[region][client] = 1; requested_slots[region][client] = req; crisv32_arbiter_config(region, NBR_OF_SLOTS - total_assigned); return 0; }
int crisv32_arbiter_allocate_bandwidth(int client, int region, unsigned long bandwidth) { int i; int total_assigned = 0; int total_clients = 0; int req; crisv32_arbiter_init(); for (i = 0; i < NBR_OF_CLIENTS; i++) { total_assigned += requested_slots[region][i]; total_clients += active_clients[region][i]; } /* */ req = bandwidth == 0 ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth); /* */ if (total_assigned + req > NBR_OF_SLOTS) return -ENOMEM; active_clients[region][client] = 1; requested_slots[region][client] = req; crisv32_arbiter_config(region, NBR_OF_SLOTS - total_assigned); return 0; }