Exemple #1
0
static noinline void do_no_context(struct pt_regs *regs)
{
	const struct exception_table_entry *fixup;

	/* Are we prepared to handle this kernel fault?  */
	fixup = search_exception_tables(regs->psw.addr);
	if (fixup) {
		regs->psw.addr = extable_fixup(fixup);
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	if (get_fault_type(regs) == KERNEL_FAULT)
		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
		       " in virtual kernel address space\n");
	else
		printk(KERN_ALERT "Unable to handle kernel paging request"
		       " in virtual user address space\n");
	dump_fault_info(regs);
	die(regs, "Oops");
	do_exit(SIGKILL);
}
Exemple #2
0
static void dump_fault_info(struct pt_regs *regs)
{
	unsigned long asce;

	pr_alert("Failing address: %016lx TEID: %016lx\n",
		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
	pr_alert("Fault in ");
	switch (regs->int_parm_long & 3) {
	case 3:
		pr_cont("home space ");
		break;
	case 2:
		pr_cont("secondary space ");
		break;
	case 1:
		pr_cont("access register ");
		break;
	case 0:
		pr_cont("primary space ");
		break;
	}
	pr_cont("mode while using ");
	switch (get_fault_type(regs)) {
	case USER_FAULT:
		asce = S390_lowcore.user_asce;
		pr_cont("user ");
		break;
	case VDSO_FAULT:
		asce = S390_lowcore.vdso_asce;
		pr_cont("vdso ");
		break;
	case GMAP_FAULT:
		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
		pr_cont("gmap ");
		break;
	case KERNEL_FAULT:
		asce = S390_lowcore.kernel_asce;
		pr_cont("kernel ");
		break;
	}
	pr_cont("ASCE.\n");
	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
}
void parse_fault_entry_common(uvm_gpu_t *gpu, NvU32 *fault_entry, uvm_fault_buffer_entry_t *buffer_entry)
{
    NV_STATUS status;
    NvU64 addr_hi, addr_lo;
    NvU64 timestamp_hi, timestamp_lo;
    bool replayable_fault_enabled;

    status = NV_OK;

    addr_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, INST_HI);
    addr_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, INST_LO);
    buffer_entry->instance_ptr.address = addr_lo + (addr_hi << HWSIZE_MW(C369, BUF_ENTRY, INST_LO));
    // HW value contains the 4K page number. Shift to build the full address
    buffer_entry->instance_ptr.address <<= 12;

    buffer_entry->instance_ptr.aperture = get_fault_inst_aperture(fault_entry);

    addr_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ADDR_HI);
    addr_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ADDR_LO);
    // HW value contains the 4K page number. Shift to build the full address
    buffer_entry->fault_address = (addr_lo + (addr_hi << HWSIZE_MW(C369, BUF_ENTRY, ADDR_LO))) << 12;
    buffer_entry->fault_address = uvm_address_get_canonical_form(buffer_entry->fault_address);

    timestamp_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, TIMESTAMP_HI);
    timestamp_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, TIMESTAMP_LO);
    buffer_entry->timestamp = timestamp_lo + (timestamp_hi << HWSIZE_MW(C369, BUF_ENTRY, TIMESTAMP_LO));

    buffer_entry->fault_type = get_fault_type(fault_entry);

    buffer_entry->fault_access_type = get_fault_access_type(fault_entry);

    buffer_entry->fault_source.client_type = get_fault_client_type(fault_entry);

    buffer_entry->fault_source.client_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, CLIENT);
    BUILD_BUG_ON(sizeof(buffer_entry->fault_source.client_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_CLIENT));

    buffer_entry->fault_source.gpc_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, GPC_ID);
    BUILD_BUG_ON(sizeof(buffer_entry->fault_source.gpc_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_GPC_ID));

    buffer_entry->is_replayable = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, REPLAYABLE_FAULT) ==
                                   NVC369_BUF_ENTRY_REPLAYABLE_FAULT_TRUE);

    // Compute global uTLB id
    if (buffer_entry->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_GPC) {
        NvU16 gpc_utlb_id = get_utlb_id_gpc(buffer_entry->fault_source.client_id);
        NvU32 utlb_id;
        UVM_ASSERT(gpc_utlb_id < uvm_volta_get_utlbs_per_gpc(gpu));

        utlb_id = buffer_entry->fault_source.gpc_id * uvm_volta_get_utlbs_per_gpc(gpu) + gpc_utlb_id;
        UVM_ASSERT(utlb_id < gpu->fault_buffer_info.replayable.utlb_count);

        buffer_entry->fault_source.utlb_id = utlb_id;
    }
    else if (buffer_entry->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_HUB) {
        buffer_entry->fault_source.utlb_id = 0;
    }

    buffer_entry->fault_source.mmu_engine_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ENGINE_ID);
    BUILD_BUG_ON(sizeof(buffer_entry->fault_source.mmu_engine_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_ENGINE_ID));

    buffer_entry->fault_source.mmu_engine_type =
        gpu->arch_hal->mmu_engine_id_to_type(buffer_entry->fault_source.mmu_engine_id);

    buffer_entry->fault_source.ve_id = uvm_volta_get_ve_id(buffer_entry->fault_source.mmu_engine_id,
                                                           buffer_entry->fault_source.mmu_engine_type);
    BUILD_BUG_ON(1 << (sizeof(buffer_entry->fault_source.ve_id) * 8) < MAX_SUBCONTEXTS);

    buffer_entry->is_virtual = is_fault_address_virtual(fault_entry);

    buffer_entry->in_protected_mode = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, PROTECTED_MODE) ==
                                       NVC369_BUF_ENTRY_PROTECTED_MODE_TRUE);

    replayable_fault_enabled = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, REPLAYABLE_FAULT_EN) ==
                                NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_TRUE);
    UVM_ASSERT_MSG(replayable_fault_enabled, "Fault with REPLAYABLE_FAULT_EN bit unset\n");
}
Exemple #4
0
/* translates data structures to soap/xml. recursive */
xml_element* SOAP_to_xml_element_worker(XMLRPC_REQUEST request, XMLRPC_VALUE node) {
#define BUF_SIZE 128
	xml_element* elem_val = NULL;
	if (node) {
		int bFreeNode = 0;  /* sometimes we may need to free 'node' variable */
		char buf[BUF_SIZE];
		XMLRPC_VALUE_TYPE_EASY type = XMLRPC_GetValueTypeEasy(node);
		char* pName = NULL, *pAttrType = NULL;

		/* create our return value element */
		elem_val = xml_elem_new();

		switch (type) {
		case xmlrpc_type_struct:
		case xmlrpc_type_mixed:
		case xmlrpc_type_array:
			if (type == xmlrpc_type_array) {
				/* array's are _very_ special in soap.
				   TODO: Should handle sparse/partial arrays here. */

				/* determine soap array type. */
				const char* type = get_array_soap_type(node);
				xml_element_attr* attr_array_type = NULL;

				/* specify array kids type and array size. */
				snprintf(buf, sizeof(buf), "%s[%i]", type, XMLRPC_VectorSize(node));
				attr_array_type = new_attr(TOKEN_ARRAY_TYPE, buf);

				Q_PushTail(&elem_val->attrs, attr_array_type);

				pAttrType = TOKEN_ARRAY;
			}
			/* check for fault, which is a rather special case. 
			   (can't these people design anything consistent/simple/elegant?) */
			else if (type == xmlrpc_type_struct) {
				int fault_type = get_fault_type(node);
				if (fault_type) {
					if (fault_type == 1) {
						/* gen fault from xmlrpc style fault codes              
						    notice that we get a new node, which must be freed herein. */
						node = gen_fault_xmlrpc(node, elem_val);
						bFreeNode = 1;
					}
					pName = TOKEN_FAULT;
				}
			}

			{
				/* recurse through sub-elements */
				XMLRPC_VALUE xIter = XMLRPC_VectorRewind(node);
				while ( xIter ) {
					xml_element* next_el = SOAP_to_xml_element_worker(request, xIter);
					if (next_el) {
						Q_PushTail(&elem_val->children, next_el);
					}
					xIter = XMLRPC_VectorNext(node);
				}
			}

			break;

			/* handle scalar types */
		case xmlrpc_type_empty:
			pAttrType = TOKEN_NULL;
			break;
		case xmlrpc_type_string:
			pAttrType = TOKEN_STRING;
			simplestring_addn(&elem_val->text, XMLRPC_GetValueString(node), XMLRPC_GetValueStringLen(node));
			break;
		case xmlrpc_type_int:
			pAttrType = TOKEN_INT;
			snprintf(buf, BUF_SIZE, "%i", XMLRPC_GetValueInt(node));
			simplestring_add(&elem_val->text, buf);
			break;
		case xmlrpc_type_boolean:
			pAttrType = TOKEN_BOOLEAN;
			snprintf(buf, BUF_SIZE, "%i", XMLRPC_GetValueBoolean(node));
			simplestring_add(&elem_val->text, buf);
			break;
		case xmlrpc_type_double:
			pAttrType = TOKEN_DOUBLE;
			snprintf(buf, BUF_SIZE, "%f", XMLRPC_GetValueDouble(node));
			simplestring_add(&elem_val->text, buf);
			break;
		case xmlrpc_type_datetime:
			{
				time_t tt = XMLRPC_GetValueDateTime(node);
				struct tm *tm = localtime (&tt);
				pAttrType = TOKEN_DATETIME;
				if(strftime (buf, BUF_SIZE, "%Y-%m-%dT%H:%M:%SZ", tm)) {
					simplestring_add(&elem_val->text, buf);
				}
			}
			break;
		case xmlrpc_type_base64:
			{
				struct buffer_st buf;
				pAttrType = TOKEN_BASE64;
				base64_encode_xmlrpc(&buf, XMLRPC_GetValueBase64(node), XMLRPC_GetValueStringLen(node));
				simplestring_addn(&elem_val->text, buf.data, buf.offset );
				buffer_delete(&buf);
			}
			break;
			break;
		default:
			break;
		}

		/* determining element's name is a bit tricky, due to soap semantics. */
		if (!pName) {
			/* if the value's type is known... */
			if (pAttrType) {
				/* see if it has an id (key). If so, use that as name, and type as an attribute. */
				pName = (char*)XMLRPC_GetValueID(node);
				if (pName) {
					Q_PushTail(&elem_val->attrs, new_attr(TOKEN_TYPE, pAttrType));
				}

				/* otherwise, use the type as the name. */
				else {
					pName = pAttrType;
				}
			}
			/* if the value's type is not known... (a rare case?) */
			else {
				/* see if it has an id (key). otherwise, default to generic "item" */
				pName = (char*)XMLRPC_GetValueID(node);
				if (!pName) {
					pName = "item";
				}
			}
		}
		elem_val->name = strdup(pName);

		/* cleanup */
		if (bFreeNode) {
			XMLRPC_CleanupValue(node);
		}
	}
	return elem_val;
}
Exemple #5
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * interruption code (int_code):
 *   04       Protection           ->  Write-Protection  (suprression)
 *   10       Segment translation  ->  Not present       (nullification)
 *   11       Page translation     ->  Not present       (nullification)
 *   3b       Region third trans.  ->  Not present       (nullification)
 */
static inline int do_exception(struct pt_regs *regs, int access)
{
	struct gmap *gmap;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	enum fault_type type;
	unsigned long trans_exc_code;
	unsigned long address;
	unsigned int flags;
	int fault;

	tsk = current;
	/*
	 * The instruction that caused the program check has
	 * been nullified. Don't signal single step via SIGTRAP.
	 */
	clear_pt_regs_flag(regs, PIF_PER_TRAP);

	if (notify_page_fault(regs))
		return 0;

	mm = tsk->mm;
	trans_exc_code = regs->int_parm_long;

	/*
	 * Verify that the fault happened in user space, that
	 * we are not in an interrupt and that there is a 
	 * user context.
	 */
	fault = VM_FAULT_BADCONTEXT;
	type = get_fault_type(regs);
	switch (type) {
	case KERNEL_FAULT:
		goto out;
	case VDSO_FAULT:
		fault = VM_FAULT_BADMAP;
		goto out;
	case USER_FAULT:
	case GMAP_FAULT:
		if (faulthandler_disabled() || !mm)
			goto out;
		break;
	}

	address = trans_exc_code & __FAIL_ADDR_MASK;
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
		flags |= FAULT_FLAG_WRITE;
	down_read(&mm->mmap_sem);

	gmap = NULL;
	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
		gmap = (struct gmap *) S390_lowcore.gmap;
		current->thread.gmap_addr = address;
		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
		current->thread.gmap_int_code = regs->int_code & 0xffff;
		address = __gmap_translate(gmap, address);
		if (address == -EFAULT) {
			fault = VM_FAULT_BADMAP;
			goto out_up;
		}
		if (gmap->pfault_enabled)
			flags |= FAULT_FLAG_RETRY_NOWAIT;
	}

retry:
	fault = VM_FAULT_BADMAP;
	vma = find_vma(mm, address);
	if (!vma)
		goto out_up;

	if (unlikely(vma->vm_start > address)) {
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out_up;
		if (expand_stack(vma, address))
			goto out_up;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
	fault = VM_FAULT_BADACCESS;
	if (unlikely(!(vma->vm_flags & access)))
		goto out_up;

	if (is_vm_hugetlb_page(vma))
		address &= HPAGE_MASK;
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(vma, address, flags);
	/* No reason to continue if interrupted by SIGKILL. */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
		fault = VM_FAULT_SIGNAL;
		goto out;
	}
	if (unlikely(fault & VM_FAULT_ERROR))
		goto out_up;

	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
				      regs, address);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
				      regs, address);
		}
		if (fault & VM_FAULT_RETRY) {
			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
				/* FAULT_FLAG_RETRY_NOWAIT has been set,
				 * mmap_sem has not been released */
				current->thread.gmap_pfault = 1;
				fault = VM_FAULT_PFAULT;
				goto out_up;
			}
			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
			 * of starvation. */
			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
				   FAULT_FLAG_RETRY_NOWAIT);
			flags |= FAULT_FLAG_TRIED;
			down_read(&mm->mmap_sem);
			goto retry;
		}
	}
	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
		address =  __gmap_link(gmap, current->thread.gmap_addr,
				       address);
		if (address == -EFAULT) {
			fault = VM_FAULT_BADMAP;
			goto out_up;
		}
		if (address == -ENOMEM) {
			fault = VM_FAULT_OOM;
			goto out_up;
		}
	}
	fault = 0;
out_up:
	up_read(&mm->mmap_sem);
out:
	return fault;
}