示例#1
0
文件: report.c 项目: HarryWei/linux
static void print_address_description(struct kasan_access_info *info)
{
	const void *addr = info->access_addr;

	if ((addr >= (void *)PAGE_OFFSET) &&
		(addr < high_memory)) {
		struct page *page = virt_to_head_page(addr);

		if (PageSlab(page)) {
			void *object;
			struct kmem_cache *cache = page->slab_cache;
			object = nearest_obj(cache, page,
						(void *)info->access_addr);
			kasan_object_err(cache, object);
			return;
		}
		dump_page(page, "kasan: bad access detected");
	}

	if (kernel_or_module_addr(addr)) {
		if (!init_task_stack_addr(addr))
			pr_err("Address belongs to variable %pS\n", addr);
	}
	dump_stack();
}
示例#2
0
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
	s8 shadow_byte;
	unsigned long rounded_up_size;

	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
	    object)) {
		kasan_report_invalid_free(object, ip);
		return true;
	}

	/* RCU slabs could be legally used after free within the RCU period */
	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
		return false;

	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
		kasan_report_invalid_free(object, ip);
		return true;
	}

	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);

	if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
		return false;

	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
	quarantine_put(get_free_info(cache, object), cache);
	return true;
}
示例#3
0
文件: bgmac.c 项目: 545191228/linux
static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
				     struct bgmac_slot_info *slot)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	dma_addr_t dma_addr;
	struct bgmac_rx_header *rx;
	void *buf;

	/* Alloc skb */
	buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
	if (!buf)
		return -ENOMEM;

	/* Poison - if everything goes fine, hardware will overwrite it */
	rx = buf + BGMAC_RX_BUF_OFFSET;
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);

	/* Map skb for the DMA */
	dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
	if (dma_mapping_error(dma_dev, dma_addr)) {
		bgmac_err(bgmac, "DMA mapping error\n");
		put_page(virt_to_head_page(buf));
		return -ENOMEM;
	}

	/* Update the slot */
	slot->buf = buf;
	slot->dma_addr = dma_addr;

	return 0;
}
示例#4
0
文件: report.c 项目: Lyude/linux
static struct page *addr_to_page(const void *addr)
{
	if ((addr >= (void *)PAGE_OFFSET) &&
			(addr < high_memory))
		return virt_to_head_page(addr);
	return NULL;
}
示例#5
0
文件: report.c 项目: DenisLug/mptcp
static void print_address_description(struct kasan_access_info *info)
{
	const void *addr = info->access_addr;

	if ((addr >= (void *)PAGE_OFFSET) &&
		(addr < high_memory)) {
		struct page *page = virt_to_head_page(addr);

		if (PageSlab(page)) {
			void *object;
			struct kmem_cache *cache = page->slab_cache;
			void *last_object;

			object = virt_to_obj(cache, page_address(page), addr);
			last_object = page_address(page) +
				page->objects * cache->size;

			if (unlikely(object > last_object))
				object = last_object; /* we hit into padding */

			object_err(cache, page, object,
				"kasan: bad access detected");
			return;
		}
		dump_page(page, "kasan: bad access detected");
	}

	if (kernel_or_module_addr(addr)) {
		if (!init_task_stack_addr(addr))
			pr_err("Address belongs to variable %pS\n", addr);
	}

	dump_stack();
}
示例#6
0
文件: kasan.c 项目: mkrufky/linux
void kasan_poison_kfree(void *ptr)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	if (unlikely(!PageSlab(page)))
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
				KASAN_FREE_PAGE);
	else
		kasan_poison_slab_free(page->slab_cache, ptr);
}
示例#7
0
struct kmem_cache *get_kmem_cache_by_addr(void *addr)
{
    struct page *page = virt_to_head_page(addr);
    // 先找到该块所在的页表
    if (!page) {
        return NULL;
    }
    // struct page中该字段指向其承载的slab的kmem_cache
    if (PageSlab(page))
        return page->slab_cache;
    else {
        return NULL;
    }
}
示例#8
0
文件: kasan.c 项目: mkrufky/linux
void kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
	struct page *page;

	if (unlikely(object == ZERO_SIZE_PTR))
		return;

	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
		kasan_kmalloc_large(object, size, flags);
	else
		kasan_kmalloc(page->slab_cache, object, size, flags);
}
示例#9
0
static void free_resource(struct resource *res)
{
	if (!res)
		return;

	if (!PageSlab(virt_to_head_page(res))) {
		spin_lock(&bootmem_resource_lock);
		res->sibling = bootmem_resource_free;
		bootmem_resource_free = res;
		spin_unlock(&bootmem_resource_lock);
	} else {
		kfree(res);
	}
}
示例#10
0
void kasan_poison_kfree(void *ptr, unsigned long ip)
{
	struct page *page;

	page = virt_to_head_page(ptr);

	if (unlikely(!PageSlab(page))) {
		if (ptr != page_address(page)) {
			kasan_report_invalid_free(ptr, ip);
			return;
		}
		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
				KASAN_FREE_PAGE);
	} else {
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}
示例#11
0
文件: dma.c 项目: Brainiarc7/mt76
static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
		    int len, bool more)
{
	struct page *page = virt_to_head_page(data);
	int offset = data - page_address(page);
	struct sk_buff *skb = q->rx_head;

	offset += q->buf_offset;
	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
			q->buf_size);

	if (more)
		return;

	q->rx_head = NULL;
	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
示例#12
0
文件: bgmac.c 项目: 545191228/linux
static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct bgmac_slot_info *slot;
	int i;

	for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
		slot = &ring->slots[i];
		if (!slot->dma_addr)
			continue;

		dma_unmap_single(dma_dev, slot->dma_addr,
				 BGMAC_RX_BUF_SIZE,
				 DMA_FROM_DEVICE);
		put_page(virt_to_head_page(slot->buf));
		slot->dma_addr = 0;
	}
}
示例#13
0
文件: nommu.c 项目: rochecr/linux
/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */
unsigned int kobjsize(const void *objp)
{
	struct page *page;

	/*
	 * If the object we have should not have ksize performed on it,
	 * return size of 0
	 */
	if (!objp || !virt_addr_valid(objp))
		return 0;

	page = virt_to_head_page(objp);

	/*
	 * If the allocator sets PageSlab, we know the pointer came from
	 * kmalloc().
	 */
	if (PageSlab(page))
		return ksize(objp);

	/*
	 * If it's not a compound page, see if we have a matching VMA
	 * region. This test is intentionally done in reverse order,
	 * so if there's no VMA, we still fall through and hand back
	 * PAGE_SIZE for 0-order pages.
	 */
	if (!PageCompound(page)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, (unsigned long)objp);
		if (vma)
			return vma->vm_end - vma->vm_start;
	}

	/*
	 * The ksize() function is only guaranteed to work for pointers
	 * returned by kmalloc(). So handle arbitrary pointers here.
	 */
	return PAGE_SIZE << compound_order(page);
}
示例#14
0
static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
		 int nsgs, int len, int sglen)
{
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	struct urb *urb = buf->urb;
	int i;

	spin_lock_bh(&q->rx_page_lock);
	for (i = 0; i < nsgs; i++) {
		struct page *page;
		void *data;
		int offset;

		data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
		if (!data)
			break;

		page = virt_to_head_page(data);
		offset = data - page_address(page);
		sg_set_page(&urb->sg[i], page, sglen, offset);
	}
	spin_unlock_bh(&q->rx_page_lock);

	if (i < nsgs) {
		int j;

		for (j = nsgs; j < urb->num_sgs; j++)
			skb_free_frag(sg_virt(&urb->sg[j]));
		urb->num_sgs = i;
	}

	urb->num_sgs = max_t(int, i, urb->num_sgs);
	buf->len = urb->num_sgs * sglen,
	sg_init_marker(urb->sg, urb->num_sgs);

	return i ? : -ENOMEM;
}
static int objhash_add_one(struct my_obj *obj)
{
	u32 hash_idx;

	if (obj == NULL) {
		pr_err("%s(): Failed, NULL object\n", __func__);
		return 0;
	}

	objhash_cnt++;
	INIT_HLIST_NODE(&obj->node);
	obj->page = virt_to_head_page(obj);

	/* Hash on the page address of the object */
	hash_idx = jhash(&obj->page, 8, 13);
	//pr_info("DEBUG: hash_idx=0x%x [%u] page=0x%p\n",
	//	hash_idx, hash_idx % HASHSZ, obj->page);
	hash_idx = hash_idx % HASHSZ;

	hlist_add_head(&obj->node, &objhash[hash_idx]);

	return 1;
}
示例#16
0
文件: bgmac.c 项目: 545191228/linux
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != end_slot) {
		struct device *dma_dev = bgmac->core->dma_dev;
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
		struct sk_buff *skb;
		void *buf = slot->buf;
		dma_addr_t dma_addr = slot->dma_addr;
		u16 len, flags;

		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}

			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			if (len > BGMAC_RX_ALLOC_SIZE) {
				bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			/* Omit CRC. */
			len -= ETH_FCS_LEN;

			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
			if (unlikely(!skb)) {
				bgmac_err(bgmac, "build_skb failed\n");
				put_page(virt_to_head_page(buf));
				break;
			}
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);

			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
			napi_gro_receive(&bgmac->napi, skb);
			handled++;
		} while (0);

		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

	bgmac_dma_rx_update_index(bgmac, ring);

	return handled;
}
示例#17
0
static struct kmem_cache *qlink_to_cache(void **qlink)
{
	return virt_to_head_page(qlink)->slab_cache;
}
示例#18
0
void kasan_kfree_large(void *ptr, unsigned long ip)
{
	if (ptr != page_address(virt_to_head_page(ptr)))
		kasan_report_invalid_free(ptr, ip);
	/* The object will be poisoned by page_alloc. */
}
示例#19
0
文件: ss_skb.c 项目: nekulin/tempesta
/**
 * The kernel may allocate a bit more memory for an SKB than what was
 * requested (see ksize() call in __alloc_skb()). Use the extra memory
 * if it's enough to hold @n bytes. Otherwise, allocate new linear data.
 *
 * @return 0 on success, -errno on failure.
 * @return SKB in @it->skb if new SKB is allocated.
 * @return pointer to the room for new data in @it->ptr if making room.
 * @return pointer to data right after the deleted fragment in @it->ptr.
 */
static int
__split_linear_data(struct sk_buff *skb, char *pspt, int len, TfwStr *it)
{
	int alloc = len > 0;
	int tail_len = (char *)skb_tail_pointer(skb) - pspt;
	struct page *page = virt_to_head_page(skb->head);

	SS_DBG("[%d]: %s: skb [%p] pspt [%p] len [%d] tail_len [%d]\n",
		smp_processor_id(), __func__, skb, pspt, len, tail_len);
	BUG_ON(!skb->head_frag);
	BUG_ON(tail_len <= 0);
	BUG_ON(!(alloc | tail_len));
	BUG_ON(-len > tail_len);

	/*
	 * Quick and unlikely path: just advance the skb tail pointer.
	 * Note that this only works when we make room. When we remove,
	 * pspt points at the start of the data chunk to remove. In that
	 * case, tail_len can never be zero.
	 */
	if (unlikely(!tail_len && len <= ss_skb_tailroom(skb))) {
		BUG_ON(len < 0);
		it->ptr = ss_skb_put(skb, len);
		return 0;
	}
	/*
	 * Quick and unlikely path: just move skb tail pointer backward.
	 * Note that this only works when we remove data, and the data
	 * is located exactly at the end of the linear part of an skb.
	 */
	if (unlikely((len < 0) && (tail_len == -len))) {
		ss_skb_put(skb, len);
		if (skb_is_nonlinear(skb))
			it->ptr = skb_frag_address(&skb_shinfo(skb)->frags[0]);
		return 0;
	}

	/*
	 * Data is inserted or deleted in the middle of the linear part,
	 * or there's insufficient room in the linear part of an SKB to
	 * insert @len bytes.
	 *
	 * Don't bother with skb tail room: if the linear part is large,
	 * then it's likely that we'll do some smaller data insertions
	 * later and go by the quick path above. Otherwise, the tail size
	 * is also small.
	 *
	 * The inserted data is placed in a fragment. The tail part is
	 * moved to yet another fragment. The linear part is trimmed to
	 * exclude the deleted data and the tail part.
	 *
	 * Do all allocations before moving the fragments to avoid complex
	 * rollback.
	 */
	if (alloc) {
		if (__new_pgfrag(skb, len, 0, alloc + !!tail_len, it))
			return -EFAULT;
	} else {
		if (__extend_pgfrags(skb, 0, 1, it))
			return -EFAULT;
		tail_len += len;	/* @len is negative. */
	}

	if (tail_len) {
		int tail_off = pspt - (char *)page_address(page);

		/*
		 * Trim the linear part by |@len| bytes if data
		 * is deleted. Then trim it further to exclude
		 * the tail data. Finally, set up the fragment
		 * allotted above with the tail data.
		 */
		if (len < 0) {
			tail_off -= len;
			skb->tail += len;
			skb->len += len;
		}
		skb->tail -= tail_len;
		skb->data_len += tail_len;
		skb->truesize += tail_len;

		__skb_fill_page_desc(skb, alloc, page, tail_off, tail_len);
		skb_frag_ref(skb, alloc);	/* get_page(page); */
	}

	it->ptr = skb_frag_address(&skb_shinfo(skb)->frags[0]);
	return 0;
}
示例#20
0
static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
{
	return virt_to_head_page(qlink)->slab_cache;
}