Exemple #1
0
gssize
(g_atomic_pointer_add) (volatile void *atomic,
                        gssize         val)
{
#if GLIB_SIZEOF_VOID_P == 8
  return InterlockedExchangeAdd64 (atomic, val);
#else
  return InterlockedExchangeAdd (atomic, val);
#endif
}
Exemple #2
0
__host__ __device__
typename enable_if<
  sizeof(Integer64) == 8,
  Integer64
>::type
atomic_fetch_sub(Integer64 *x, Integer64 y)
{
#if defined(__CUDA_ARCH__)
  return atomicSub(x, y);
#elif defined(__GNUC__)
  return __atomic_fetch_sub(x, y, __ATOMIC_SEQ_CST);
#elif defined(_MSC_VER)
  return InterlockedExchangeAdd64(x, -y);
#elif defined(__clang__)
  return __c11_atomic_fetch_sub(x, y)
#else
#error "No atomic_fetch_sub implementation."
#endif
}
Exemple #3
0
VOID KphUnlockHandleTableEntry(
    __in PHANDLE_TABLE HandleTable,
    __in PHANDLE_TABLE_ENTRY HandleTableEntry
    )
{
    PEX_PUSH_LOCK handleContentionEvent;

    PAGED_CODE();

    // Set the unlocked bit.

#ifdef _M_X64
    InterlockedExchangeAdd64(&HandleTableEntry->Value, 1);
#else
    InterlockedExchangeAdd(&HandleTableEntry->Value, 1);
#endif

    // Allow waiters to wake up.

    handleContentionEvent = (PEX_PUSH_LOCK)((ULONG_PTR)HandleTable + KphDynHtHandleContentionEvent);

    if (*(PULONG_PTR)handleContentionEvent != 0)
        ExfUnblockPushLock(handleContentionEvent, NULL);
}
Exemple #4
0
int64_t ExchAdd64(volatile int64_t *i, int64_t a)
{
  return (int64_t)InterlockedExchangeAdd64((volatile LONG64 *)i, a);
}
//---------------------------------------------------------------------------
int64_t interlockedIncrement(volatile int64_t & v,int64_t a)
{
  return InterlockedExchangeAdd64((LONGLONG *) &v,a);
}
Exemple #6
0
static void segment_allocate_add(mempool p,  uint64 count)
{

    // Required Memory:
    //  sz( segment )
    //  count * sz( real_node_size )
    //
    //  where real node size is:
    //      ALIGN_TO_16( sz( node ) ) + p->elem_size
    //  so the nodes usable address is  nodebase + ALIGN_TO_16(sz(node))
    //
    size_t total_sz;
    struct pool_segment *seg = NULL;
    struct node *nodeList = NULL;
    struct node *node = NULL;
    char *ptr = NULL;
    uint64 i;

    total_sz = ALIGN_TO_16(sizeof(struct pool_segment))
               + ((size_t)count * (sizeof(struct node) + (size_t)p->elem_size)) ;

#ifdef MEMPOOL_DEBUG
    ShowDebug(read_message("Source.common.mempool_debug"), p->name, count, (float)total_sz/1024.f/1024.f);
#endif

    // allocate! (spin forever until weve got the memory.)
    i=0;
    while(1) {
        ptr = (char *)aMalloc(total_sz);
        if(ptr != NULL) break;

        i++; // increase failcount.
        if(!(i & 7)) {
            ShowWarning(read_message("Source.common.mempool_debug2"), (float)total_sz/1024.f/1024.f,  i);
#ifdef WIN32
            Sleep(1000);
#else
            sleep(1);
#endif
        } else {
            rathread_yield(); /// allow/force vuln. ctxswitch
        }
    }//endwhile: allocation spinloop.

    // Clear Memory.
    memset(ptr, 0x00, total_sz);

    // Initialize segment struct.
    seg = (struct pool_segment *)ptr;
    ptr += ALIGN_TO_16(sizeof(struct pool_segment));

    seg->pool = p;
    seg->num_nodes_total = count;
    seg->num_bytes = total_sz;


    // Initialze nodes!
    nodeList = NULL;
    for(i = 0; i < count; i++) {
        node = (struct node *)ptr;
        ptr += sizeof(struct node);
        ptr += p->elem_size;

        node->segment = seg;
#ifdef MEMPOOLASSERT
        node->used = false;
        node->magic = NODE_MAGIC;
#endif

        if(p->onalloc != NULL)  p->onalloc(NODE_TO_DATA(node));

        node->next = nodeList;
        nodeList = node;
    }



    // Link in Segment.
    EnterSpinLock(&p->segmentLock);
    seg->next = p->segments;
    p->segments = seg;
    LeaveSpinLock(&p->segmentLock);

    // Link in Nodes
    EnterSpinLock(&p->nodeLock);
    nodeList->next = p->free_list;
    p->free_list = nodeList;
    LeaveSpinLock(&p->nodeLock);


    // Increase Stats:
    InterlockedExchangeAdd64(&p->num_nodes_total,  count);
    InterlockedExchangeAdd64(&p->num_nodes_free,   count);
    InterlockedIncrement64(&p->num_segments);
    InterlockedExchangeAdd64(&p->num_bytes_total,   total_sz);

}//end: segment_allocate_add()
NTSTATUS
AIMWrFltrWrite(IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp)
{
    PDEVICE_EXTENSION device_extension = (PDEVICE_EXTENSION)DeviceObject->DeviceExtension;

    PIO_STACK_LOCATION io_stack = IoGetCurrentIrpStackLocation(Irp);

    NTSTATUS status;

    if (!device_extension->Statistics.IsProtected)
    {
        return AIMWrFltrSendToNextDriver(DeviceObject, Irp);
    }

    if (!device_extension->Statistics.Initialized)
    {
        status = AIMWrFltrInitializeDiffDevice(device_extension);

        if (!NT_SUCCESS(status))
        {
            status = STATUS_MEDIA_WRITE_PROTECTED;

            Irp->IoStatus.Information = 0;
            Irp->IoStatus.Status = status;
            IoCompleteRequest(Irp, IO_NO_INCREMENT);
            return status;
        }
    }

    InterlockedIncrement64(&device_extension->Statistics.WriteRequests);

    if (io_stack->Parameters.Write.Length == 0)
    {
        // Turn a zero-byte write request into a read request to take
        // advantage of just bound checks etc by target device driver

        IoGetNextIrpStackLocation(Irp)->MajorFunction = IRP_MJ_READ;

        return AIMWrFltrSendToNextDriver(DeviceObject, Irp);
    }

    InterlockedExchangeAdd64(&device_extension->Statistics.WrittenBytes,
        io_stack->Parameters.Write.Length);

    LONGLONG highest_byte =
        io_stack->Parameters.Write.ByteOffset.QuadPart +
        io_stack->Parameters.Write.Length;

    if ((io_stack->Parameters.Write.ByteOffset.QuadPart >=
        device_extension->Statistics.DiffDeviceVbr.Fields.Head.Size.QuadPart) ||
        (highest_byte <= 0) ||
        (highest_byte > device_extension->Statistics.DiffDeviceVbr.Fields.Head.Size.QuadPart))
    {
        Irp->IoStatus.Status = STATUS_END_OF_MEDIA;
        IoCompleteRequest(Irp, IO_NO_INCREMENT);

        KdBreakPoint();

        return STATUS_END_OF_MEDIA;
    }

    if (io_stack->Parameters.Write.Length >
        device_extension->Statistics.LargestWriteSize)
    {
        device_extension->Statistics.LargestWriteSize =
            io_stack->Parameters.Write.Length;

        KdPrint(("AIMWrFltrWrite: Largest write size is now %u KB\n",
            device_extension->Statistics.LargestWriteSize >> 10));
    }
Exemple #8
0
DECLSPEC_NOINLINE
PKAFFINITY
KiIpiSendRequest (
    IN KAFFINITY TargetSet,
    IN ULONG64 Parameter,
    IN ULONG64 Count,
    IN ULONG64 RequestType
    )

/*++

Routine Description:

    This routine executes the specified immediate request on the specified
    set of processors.

    N.B. This function MUST be called from a non-context switchable state.

Arguments:

   TargetProcessors - Supplies the set of processors on which the specfied
       operation is to be executed.

   Parameter - Supplies the parameter data that will be packed into the
       request summary.

   Count - Supplies the count data that will be packed into the request summary.

   RequestType - Supplies the type of immediate request.

Return Value:

    The address of the appropriate request barrier is returned as the function
    value.

--*/

{

#if !defined(NT_UP)

    PKAFFINITY Barrier;
    PKPRCB Destination;
    ULONG Number;
    KAFFINITY PacketTargetSet;
    PKPRCB Prcb;
    ULONG Processor;
    PREQUEST_MAILBOX RequestMailbox;
    KAFFINITY SetMember;
    PVOID *Start;
    KAFFINITY SummarySet;
    KAFFINITY TargetMember;
    REQUEST_SUMMARY Template;
    PVOID *Virtual;

    ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);

    //
    // Initialize request template.
    //

    Prcb = KeGetCurrentPrcb();
    Template.Summary = 0;
    Template.IpiRequest = RequestType;
    Template.Count = Count;
    Template.Parameter = Parameter;

    //
    // If the target set contains one and only one processor, then use the
    // target set for signal done synchronization. Otherwise, use packet
    // barrier for signal done synchronization.
    //

    Prcb->TargetSet = TargetSet;
    if ((TargetSet & (TargetSet - 1)) == 0) {
        Template.IpiSynchType = TRUE;
        Barrier = (PKAFFINITY)&Prcb->TargetSet;

    } else {
        Prcb->PacketBarrier = 1;
        Barrier = (PKAFFINITY)&Prcb->PacketBarrier;
    }

    //
    // Loop through the target set of processors and set the request summary.
    // If a target processor is already processing a request, then remove
    // that processor from the target set of processor that will be sent an
    // interprocessor interrupt.
    //
    // N.B. It is guaranteed that there is at least one bit set in the target
    //      set.
    //

    Number = Prcb->Number;
    SetMember = Prcb->SetMember;
    SummarySet = TargetSet;
    PacketTargetSet = TargetSet;
    BitScanForward64(&Processor, SummarySet);
    do {
        Destination = KiProcessorBlock[Processor];
        PrefetchForWrite(&Destination->SenderSummary);
        RequestMailbox = &Destination->RequestMailbox[Number];
        PrefetchForWrite(RequestMailbox);
        TargetMember = AFFINITY_MASK(Processor);

        //
        // Make sure that processing of the last IPI is complete before sending
        // another IPI to the same processor.
        // 

        while ((Destination->SenderSummary & SetMember) != 0) {
            KeYieldProcessor();
        }

        //
        // If the request type is flush multiple and the flush entries will
        // fit in the mailbox, then copy the virtual address array to the
        // destination mailbox and change the request type to flush immediate.
        //
        // If the request type is packet ready, then copy the packet to the
        // destination mailbox.
        //

        if (RequestType == IPI_FLUSH_MULTIPLE) {
            Virtual = &RequestMailbox->Virtual[0];
            Start = (PVOID *)Parameter;
            switch (Count) {

                //
                // Copy of up to seven virtual addresses and a conversion of
                // the request type to flush multiple immediate.
                //

            case 7:
                Virtual[6] = Start[6];
            case 6:
                Virtual[5] = Start[5];
            case 5:
                Virtual[4] = Start[4];
            case 4:
                Virtual[3] = Start[3];
            case 3:
                Virtual[2] = Start[2];
            case 2:
                Virtual[1] = Start[1];
            case 1:
                Virtual[0] = Start[0];
                Template.IpiRequest = IPI_FLUSH_MULTIPLE_IMMEDIATE;
                break;
            }

        } else if (RequestType == IPI_PACKET_READY) {
            RequestMailbox->RequestPacket = *(PKREQUEST_PACKET)Parameter;
        }

        RequestMailbox->RequestSummary = Template.Summary;
        if (InterlockedExchangeAdd64((LONG64 volatile *)&Destination->SenderSummary,
                                     SetMember) != 0) {

            TargetSet ^= TargetMember;
        }

        SummarySet ^= TargetMember;
    } while (BitScanForward64(&Processor, SummarySet) != FALSE);

    //
    // Request interprocessor interrupts on the remaining target set of
    // processors.
    //
    //
    // N.B. For packet sends, there exists a potential deadlock situation
    //      unless an IPI is sent to the original set of target processors.
    //      The deadlock arises from the fact that the targets will spin in
    //      their IPI routines.
    //

    if (RequestType == IPI_PACKET_READY) {
        TargetSet = PacketTargetSet;
    }

    if (TargetSet != 0) {
        HalRequestIpi(TargetSet);
    }

    return Barrier;

#else

    UNREFERENCED_PARAMETER(TargetSet);
    UNREFERENCED_PARAMETER(Parameter);
    UNREFERENCED_PARAMETER(Count);
    UNREFERENCED_PARAMETER(RequestType);

    return NULL;

#endif

}
Exemple #9
0
DECLSPEC_NOINLINE
VOID
KiIpiProcessRequests (
    VOID
    )

/*++

Routine Description:

    This routine processes interprocessor requests and returns a summary
    of the requests that were processed.

Arguments:

    None.

Return Value:

    None.

--*/

{

#if !defined(NT_UP)

    PVOID *End;
    ULONG64 Number;
    PKPRCB Packet;
    PKPRCB Prcb;
    ULONG Processor;
    REQUEST_SUMMARY Request;
    PREQUEST_MAILBOX RequestMailbox;
    PKREQUEST_PACKET RequestPacket;
    LONG64 SetMember;
    PKPRCB Source;
    KAFFINITY SummarySet;
    KAFFINITY TargetSet;
    PVOID *Virtual;

    //
    // Loop until the sender summary is zero.
    //

    Prcb = KeGetCurrentPrcb();
    TargetSet = ReadForWriteAccess(&Prcb->SenderSummary);
    SetMember = Prcb->SetMember;
    while (TargetSet != 0) {
        SummarySet = TargetSet;
        BitScanForward64(&Processor, SummarySet);
        do {
            Source = KiProcessorBlock[Processor];
            RequestMailbox = &Prcb->RequestMailbox[Processor];
            Request.Summary = RequestMailbox->RequestSummary;

            //
            // If the request type is flush multiple immediate, flush process,
            // flush single, or flush all, then packet done can be signaled
            // before processing the request. Otherwise, the request type must
            // be a packet request, a cache invalidate, or a flush multiple
            //

            if (Request.IpiRequest <= IPI_FLUSH_ALL) {

                //
                // If the synchronization type is target set, then the IPI was
                // only between two processors and target set should be used
                // for synchronization. Otherwise, packet barrier is used for
                // synchronization.
                //
    
                if (Request.IpiSynchType == 0) {
                    if (SetMember == InterlockedXor64((PLONG64)&Source->TargetSet,
                                                      SetMember)) {
    
                        Source->PacketBarrier = 0;
                    }
    
                } else {
                    Source->TargetSet = 0;
                }

                if (Request.IpiRequest == IPI_FLUSH_MULTIPLE_IMMEDIATE) {
                    Number = Request.Count;
                    Virtual = &RequestMailbox->Virtual[0];
                    End = Virtual + Number;
                    do {
                        KiFlushSingleTb(*Virtual);
                        Virtual += 1;
                    } while (Virtual < End);

                } else if (Request.IpiRequest == IPI_FLUSH_PROCESS) {
                    KiFlushProcessTb();
        
                } else if (Request.IpiRequest == IPI_FLUSH_SINGLE) {
                    KiFlushSingleTb((PVOID)Request.Parameter);
        
                } else {

                    ASSERT(Request.IpiRequest == IPI_FLUSH_ALL);

                    KeFlushCurrentTb();
                }

            } else {

                //
                // If the request type is packet ready, then call the worker
                // function. Otherwise, the request must be either a flush
                // multiple or a cache invalidate.
                //
        
                if (Request.IpiRequest == IPI_PACKET_READY) {
                    Packet = Source;
                    if (Request.IpiSynchType != 0) {
                        Packet = (PKPRCB)((ULONG64)Source + 1);
                    }
    
                    RequestPacket = (PKREQUEST_PACKET)&RequestMailbox->RequestPacket;
                    (RequestPacket->WorkerRoutine)((PKIPI_CONTEXT)Packet,
                                                   RequestPacket->CurrentPacket[0],
                                                   RequestPacket->CurrentPacket[1],
                                                   RequestPacket->CurrentPacket[2]);
        
                } else {
                    if (Request.IpiRequest == IPI_FLUSH_MULTIPLE) {
                        Number = Request.Count;
                        Virtual = (PVOID *)Request.Parameter;
                        End = Virtual + Number;
                        do {
                            KiFlushSingleTb(*Virtual);
                            Virtual += 1;
                        } while (Virtual < End);

                    } else if (Request.IpiRequest == IPI_INVALIDATE_ALL) {
                        WritebackInvalidate();

                    } else {

                        ASSERT(FALSE);

                    }
        
                    //
                    // If the synchronization type is target set, then the IPI was
                    // only between two processors and target set should be used
                    // for synchronization. Otherwise, packet barrier is used for
                    // synchronization.
                    //
        
                    if (Request.IpiSynchType == 0) {
                        if (SetMember == InterlockedXor64((PLONG64)&Source->TargetSet,
                                                          SetMember)) {
        
                            Source->PacketBarrier = 0;
                        }
        
                    } else {
                        Source->TargetSet = 0;
                    }
                }
            }
            
            SummarySet ^= AFFINITY_MASK(Processor);
        } while (BitScanForward64(&Processor, SummarySet) != FALSE);

        //
        // Clear target set in sender summary.
        //

        TargetSet = 
            InterlockedExchangeAdd64((LONG64 volatile *)&Prcb->SenderSummary,
                                     -(LONG64)TargetSet) - TargetSet;
    }

#endif

    return;
}
Exemple #10
0
_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) {

	return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
}
Exemple #11
0
intptr_t atom_add(volatile intptr_t *dest, intptr_t incr)
{
	return InterlockedExchangeAdd64((LONGLONG *)dest, incr) + incr;
}
 static inline Type fetch_add(volatile Type& storage, Type value) {
   return static_cast<Type>(InterlockedExchangeAdd64((__int64*)&storage, (__int64)value));
 }
Exemple #13
0
 template<typename T> static T add(volatile T*scalar, T other) {
     return other + (T)InterlockedExchangeAdd64((volatile LONGLONG*)scalar,(LONGLONG) other);
 }