static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; int timeout = utimeout; acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); while (true) { temp = RREG32(mmCP_HQD_ACTIVE); if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) break; if (timeout <= 0) { pr_err("kfd: cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } msleep(20); timeout -= 20; } release_queue(kgd); return 0; }
static int kgd_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (54+4) #define DUMP_REG(addr) do { \ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ break; \ (*dump)[i][0] = (addr) << 2; \ (*dump)[i++][1] = RREG32(addr); \ } while (0) *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; acquire_queue(kgd, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3); for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) DUMP_REG(reg); release_queue(kgd); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; return 0; }
void edict_unlink(edict_t *edict) { int ret; poolresult_message_t message; ret = pthread_mutex_lock(&edict->reference.mx); assert(0 == ret); assert(edict->reference.count > 0); if (--edict->reference.count == 0) { /* last reference */ if (edict->resultmq > 0) while (release_queue(edict->resultmq) < 0) { /* queue wasn't emtpy */ logstr(GLOG_INSANE, "queue not empty, flushing"); ret = get_msg_timed(edict->resultmq, &message, sizeof(message.result), -1); if (ret > 0) { assert(message.result); free((chkresult_t *)message.result); message.result = NULL; } } pthread_mutex_unlock(&edict->reference.mx); Free(edict); } else { pthread_mutex_unlock(&edict->reference.mx); } }
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; bool valid_wptr = false; m = get_mqd(mqd); acquire_queue(kgd, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ mqd_hqd = &m->cp_mqd_base_addr_lo; for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); /* Copy userspace write pointer value to register. * Activate doorbell logic to monitor subsequent changes. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); /* read_user_ptr may take the mm->mmap_sem. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ release_queue(kgd); valid_wptr = read_user_wptr(mm, wptr, wptr_val); acquire_queue(kgd, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); release_queue(kgd); return 0; }
static void close_harbor(struct harbor *h, int id) { struct slave *s = &h->s[id]; s->status = STATUS_DOWN; if (s->fd) { skynet_socket_close(h->ctx, s->fd); } if (s->queue) { release_queue(s->queue); s->queue = NULL; } }
static void hash_delete(struct hashmap *hash) { int i; for (i=0;i<HASH_SIZE;i++) { struct keyvalue * node = hash->node[i]; while (node) { struct keyvalue * next = node->next; release_queue(node->queue); skynet_free(node); node = next; } } skynet_free(hash); }
static void release_sa_master(struct ix_sa_master *master) { struct npe_crypt_cont *cont; unsigned long flags; write_lock_irqsave(&master->lock, flags); while (master->pool) { cont = master->pool; master->pool = cont->next; dma_pool_free(master->dmapool, cont, cont->phys); master->pool_size--; } write_unlock_irqrestore(&master->lock, flags); if (master->pool_size) { printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n", master->pool_size); } dma_pool_destroy(master->dmapool); release_queue(master->sendq); release_queue(master->recvq); return_npe_dev(master->npe_dev); }
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; acquire_queue(kgd, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); if (low == RREG32(mmCP_HQD_PQ_BASE) && high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } release_queue(kgd); return retval; }
static void port_inc_deinit(struct module *m) { struct port_inc_priv *priv = get_priv(m); release_queue(priv->port, PACKET_DIR_INC, 0 /* XXX */, m); }
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; int retry; struct vi_mqd *m = get_mqd(mqd); if (adev->in_gpu_reset) return -EIO; acquire_queue(kgd, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); switch (reset_type) { case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: type = DRAIN_PIPE; break; case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: type = RESET_WAVES; break; default: type = DRAIN_PIPE; break; } /* Workaround: If IQ timer is active and the wait time is close to or * equal to 0, dequeueing is not safe. Wait until either the wait time * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is * cleared before continuing. Also, ensure wait times are set to at * least 0x3. */ local_irq_save(flags); preempt_disable(); retry = 5000; /* wait for 500 usecs at maximum */ while (true) { temp = RREG32(mmCP_HQD_IQ_TIMER); if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { pr_debug("HW is processing IQ\n"); goto loop; } if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) == 3) /* SEM-rearm is safe */ break; /* Wait time 3 is safe for CP, but our MMIO read/write * time is close to 1 microsecond, so check for 10 to * leave more buffer room */ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) >= 10) break; pr_debug("IQ timer is active\n"); } else break; loop: if (!retry) { pr_err("CP HQD IQ timer status time out\n"); break; } ndelay(100); --retry; } retry = 1000; while (true) { temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) break; pr_debug("Dequeue request is pending\n"); if (!retry) { pr_err("CP HQD dequeue request time out\n"); break; } ndelay(100); --retry; } local_irq_restore(flags); preempt_enable(); WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { temp = RREG32(mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } usleep_range(500, 1000); } release_queue(kgd); return 0; }
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; bool valid_wptr = false; m = get_mqd(mqd); acquire_queue(kgd, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { uint32_t value, mec, pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); value = RREG32(mmRLC_CP_SCHEDULERS); value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, ((mec << 5) | (pipe << 3) | queue_id | 0x80)); WREG32(mmRLC_CP_SCHEDULERS, value); } /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++) WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); /* Tonga errata: EOP RPTR/WPTR should be left unmodified. * This is safe since EOP RPTR==WPTR for any inactive HQD * on ASICs that do not support context-save. * EOP writes/reads can start anywhere in the ring. */ if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); } for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++) WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); /* Copy userspace write pointer value to register. * Activate doorbell logic to monitor subsequent changes. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); /* read_user_ptr may take the mm->mmap_sem. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ release_queue(kgd); valid_wptr = read_user_wptr(mm, wptr, wptr_val); acquire_queue(kgd, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); release_queue(kgd); return 0; }
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr) { struct vi_mqd *m; uint32_t shadow_wptr, valid_wptr; struct amdgpu_device *adev = get_amdgpu_device(kgd); m = get_mqd(mqd); valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr)); acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control); WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo); WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi); WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid); WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state); WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority); WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority); WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum); WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo); WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi); WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo); WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, m->cp_hqd_pq_rptr_report_addr_hi); if (valid_wptr > 0) WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr); WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control); WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo); WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi); WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control); WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events); WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo); WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi); WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control); WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset); WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size); WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset); WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size); WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control); WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request); WREG32(mmCP_HQD_ERROR, m->cp_hqd_error); WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones); WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active); release_queue(kgd); return 0; }
static int init_sa_master(struct ix_sa_master *master) { struct npe_info *npe; int ret = -ENODEV; if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) { printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); return ret; } memset(master, 0, sizeof(struct ix_sa_master)); master->npe_dev = get_npe_by_id(NPE_ID); if (! master->npe_dev) goto err; npe = dev_get_drvdata(master->npe_dev); if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) { switch (npe->img_info[1]) { case 4: printk(KERN_INFO "Crypto AES avaialable\n"); break; case 5: printk(KERN_INFO "Crypto AES and CCM avaialable\n"); break; default: printk(KERN_WARNING "Current microcode for %s has no" " crypto capabilities\n", npe->plat->name); break; } } rwlock_init(&master->lock); master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev, sizeof(struct npe_crypt_cont), 32, 0); if (!master->dmapool) { ret = -ENOMEM; goto err; } master->sendq = request_queue(SEND_QID, QUEUE_SIZE); if (IS_ERR(master->sendq)) { printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n", SEND_QID); ret = PTR_ERR(master->sendq); goto err; } master->recvq = request_queue(RECV_QID, QUEUE_SIZE); if (IS_ERR(master->recvq)) { printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n", RECV_QID); ret = PTR_ERR(master->recvq); release_queue(master->sendq); goto err; } master->recvq->irq_cb = irqcb_recv; queue_set_watermarks(master->recvq, 0, 0); queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E); queue_enable_irq(master->recvq); printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n"); return 0; err: if (master->dmapool) dma_pool_destroy(master->dmapool); if (! master->npe_dev) put_device(master->npe_dev); return ret; }