The Labs \ Source Viewer \ SSCLI \ System.Runtime \ MemoryFailPoint

  1. // ==++==
  2. //
  3. //
  4. // Copyright (c) 2006 Microsoft Corporation. All rights reserved.
  5. //
  6. // The use and distribution terms for this software are contained in the file
  7. // named license.txt, which can be found in the root of this distribution.
  8. // By using this software in any fashion, you are agreeing to be bound by the
  9. // terms of this license.
  10. //
  11. // You must not remove this notice, or any other, from this software.
  12. //
  13. //
  14. // ==--==
  15. /*============================================================
  16. **
  17. ** Class:  MemoryFailPoint
  18. **
  19. **
  20. ** Provides a way for an app to not start an operation unless
  21. ** there's a reasonable chance there's enough memory
  22. ** available for the operation to succeed.
  23. **
  24. **
  25. ===========================================================*/
  26. using System;
  27. using System.IO;
  28. using Microsoft.Win32;
  29. using System.Runtime.InteropServices;
  30. using System.Threading;
  31. using System.Runtime.CompilerServices;
  32. using System.Runtime.ConstrainedExecution;
  33. using System.Security.Permissions;
  34. using System.Runtime.Versioning;
  35. /*
  36.   This class allows an application to fail before starting certain
  37.   activities.  The idea is to fail early instead of failing in the middle
  38.   of some long-running operation to increase the survivability of the
  39.   application and ensure you don't have to write tricky code to handle an
  40.   OOM anywhere in your app's code (which implies state corruption, meaning you
  41.   should unload the appdomain, if you have a transacted environment to ensure
  42.   rollback of individual transactions).  This is an incomplete tool to attempt
  43.   hoisting all your OOM failures from anywhere in your worker methods to one
  44.   particular point where it is easier to handle an OOM failure, and you can
  45.   optionally choose to not start a workitem if it will likely fail.  This does
  46.   not help the performance of your code directly (other than helping to avoid
  47.   AD unloads).  The point is to avoid starting work if it is likely to fail. 
  48.   The Enterprise Services team has used these memory gates effectively in the
  49.   unmanaged world for a decade.
  50.   In Whidbey, we will simply check to see if there is enough memory available
  51.   in the OS's page file & attempt to ensure there might be enough space free
  52.   within the process's address space (checking for address space fragmentation
  53.   as well).  We will not commit or reserve any memory.  To avoid races with
  54.   other threads using MemoryFailPoints, we'll also keep track of a
  55.   process-wide amount of memory "reserved" via all currently-active
  56.   MemoryFailPoints.  This has two problems:
  57.       1) This can account for memory twice.  If a thread creates a
  58.         MemoryFailPoint for 100 MB then allocates 99 MB, we'll see 99 MB
  59.         less free memory and 100 MB less reserved memory.  Yet, subtracting
  60.         off the 100 MB is necessary because the thread may not have started
  61.         allocating memory yet.  Disposing of this class immediately after
  62.         front-loaded allocations have completed is a great idea.
  63.       2) This is still vulnerable to races with other threads that don't use
  64.         MemoryFailPoints.
  65.   So this class is far from perfect.  But it may be good enough to
  66.   meaningfully reduce the frequency of OutOfMemoryExceptions in managed apps.
  67.   In Orcas or later, we might allocate some memory from the OS and add it
  68.   to a allocation context for this thread.  Obviously, at that point we need
  69.   some way of conveying when we release this block of memory.  So, we
  70.   implemented IDisposable on this type in Whidbey and expect all users to call
  71.   this from within a using block to provide lexical scope for their memory
  72.   usage.  The call to Dispose (implicit with the using block) will give us an
  73.   opportunity to release this memory, perhaps.  We anticipate this will give
  74.   us the possibility of a more effective design in a future version.
  75.   In Orcas, we may also need to differentiate between allocations that would
  76.   go into the normal managed heap vs. the large object heap, or we should
  77.   consider checking for enough free space in both locations (with any
  78.   appropriate adjustments to ensure the memory is contiguous).
  79. */
  80. namespace System.Runtime
  81. {
  82.     public sealed class MemoryFailPoint : CriticalFinalizerObject, IDisposable
  83.     {
  84.         // Find the top section of user mode memory. Avoid the last 64K.
  85.         // Windows reserves that block for the kernel, apparently, and doesn't
  86.         // let us ask about that memory. But since we ask for memory in 1 MB
  87.         // chunks, we don't have to special case this. Also, we need to
  88.         // deal with 32 bit machines in 3 GB mode.
  89.         // Using Win32's GetSystemInfo should handle all this for us.
  90.         private static readonly ulong TopOfMemory;
  91.        
  92.         // Walking the address space is somewhat expensive, taking around half
  93.         // a millisecond. Doing that per transaction limits us to a max of
  94.         // ~2000 transactions/second. Instead, let's do this address space
  95.         // walk once every 10 seconds, or when we will likely fail. This
  96.         // amortization scheme can reduce the cost of a memory gate by about
  97.         // a factor of 100.
  98.         private static long LastKnownFreeAddressSpace = 0;
  99.         private static long LastTimeCheckingAddressSpace = 0;
  100.         private const int CheckThreshold = 10 * 1000;
  101.         // 10 seconds
  102.         // When allocating memory segment by segment, we've hit some cases
  103.         // where there are only 22 MB of memory available on the machine,
  104.         // we need 1 16 MB segment, and the OS does not succeed in giving us
  105.         // that memory. Reasons for this could include:
  106.         // 1) The GC does allocate memory when doing a collection.
  107.         // 2) Another process on the machine could grab that memory.
  108.         // 3) Some other part of the runtime might grab this memory.
  109.         // If we build in a little padding, we can help protect
  110.         // ourselves against some of these cases, and we want to err on the
  111.         // conservative side with this class.
  112.         private const int LowMemoryFudgeFactor = 16 << 20;
  113.        
  114.         // Note: This may become dynamically tunable in the future.
  115.         // Also note that we can have different segment sizes for the normal vs.
  116.         // large object heap. We currently use the max of the two.
  117.         private static readonly uint GCSegmentSize;
  118.        
  119.         // For multi-threaded workers, we want to ensure that if two workers
  120.         // use a MemoryFailPoint at the same time, and they both succeed, that
  121.         // they don't trample over each other's memory. Keep a process-wide
  122.         // count of "reserved" memory, and decrement this in Dispose and
  123.         // in the critical finalizer. See
  124.         // SharedStatics.MemoryFailPointReservedMemory
  125.        
  126.         private ulong _reservedMemory;
  127.         // The size of this request (from user)
  128.         private bool _mustSubtractReservation;
  129.         // Did we add data to SharedStatics?
  130.         static MemoryFailPoint()
  131.         {
  132.             GetMemorySettings(out GCSegmentSize, out TopOfMemory);
  133.         }
  134.        
  135.         // We can remove this link demand in a future version - we will
  136.         // have scenarios for this in partial trust in the future, but
  137.         // we're doing this just to restrict this in case the code below
  138.         // is somehow incorrect.
  139.         [SecurityPermission(SecurityAction.LinkDemand, UnmanagedCode = true)]
  140.         [ResourceExposure(ResourceScope.None)]
  141.         // Just gives info about free mem.
  142.         [ResourceConsumption(ResourceScope.Process, ResourceScope.Process)]
  143.         public MemoryFailPoint(int sizeInMegabytes)
  144.         {
  145.             if (sizeInMegabytes <= 0)
  146.                 throw new ArgumentOutOfRangeException("sizeInMegabytes", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
  147.            
  148.             ulong size = ((ulong)sizeInMegabytes) << 20;
  149.             _reservedMemory = size;
  150.             // Allow the fail point to succeed, unless we have a
  151.             // platform-independent way of checking the amount of free memory.
  152.             // We could allocate a byte[] then discard it, but we don't want
  153.             // to generate garbage like that.
  154.         }
  155.        
  156.        
  157.         [MethodImpl(MethodImplOptions.InternalCall)]
  158.         private static extern void GetMemorySettings(out uint maxGCSegmentSize, out ulong topOfMemory);
  159.        
  160.         ~MemoryFailPoint()
  161.         {
  162.             Dispose(false);
  163.         }
  164.        
  165.         // Applications must call Dispose, which conceptually "releases" the
  166.         // memory that was "reserved" by the MemoryFailPoint. This affects a
  167.         // global count of reserved memory in this version (helping to throttle
  168.         // future MemoryFailPoints) in this version. We may in the
  169.         // future create an allocation context and release it in the Dispose
  170.         // method. While the finalizer will eventually free this block of
  171.         // memory, apps will help their performance greatly by calling Dispose.
  172.         public void Dispose()
  173.         {
  174.             Dispose(true);
  175.             GC.SuppressFinalize(this);
  176.         }
  177.        
  178.         [ReliabilityContract(Consistency.WillNotCorruptState, Cer.Success)]
  179.         private void Dispose(bool disposing)
  180.         {
  181.             // This is just bookkeeping to ensure multiple threads can really
  182.             // get enough memory, and this does not actually reserve memory
  183.             // within the GC heap.
  184.             if (_mustSubtractReservation) {
  185.                 RuntimeHelpers.PrepareConstrainedRegions();
  186.                 try {
  187.                 }
  188.                 finally {
  189.                     SharedStatics.AddMemoryFailPointReservation(-((long)_reservedMemory));
  190.                     _mustSubtractReservation = false;
  191.                 }
  192.             }
  193.            
  194.             /*
  195.             //                                 
  196.             Interlocked.Add(ref LastKnownFreeAddressSpace, _reservedMemory);
  197.             */           
  198.         }
  199.        
  200.         #if _DEBUG
  201.         [Serializable()]
  202.         internal sealed class MemoryFailPointState
  203.         {
  204.             private int _allocationSizeInMB;
  205.             private ulong _segmentSize;
  206.             private bool _needPageFile;
  207.             private bool _needAddressSpace;
  208.             private bool _needContiguousVASpace;
  209.             private ulong _availPageFile;
  210.             private ulong _totalFreeAddressSpace;
  211.             private long _lastKnownFreeAddressSpace;
  212.             private ulong _reservedMem;
  213.            
  214.             internal MemoryFailPointState(int allocationSizeInMB, ulong segmentSize, bool needPageFile, bool needAddressSpace, bool needContiguousVASpace, ulong availPageFile, ulong totalFreeAddressSpace, long lastKnownFreeAddressSpace, ulong reservedMem)
  215.             {
  216.                 _allocationSizeInMB = allocationSizeInMB;
  217.                 _segmentSize = segmentSize;
  218.                 _needPageFile = needPageFile;
  219.                 _needAddressSpace = needAddressSpace;
  220.                 _needContiguousVASpace = needContiguousVASpace;
  221.                 _availPageFile = availPageFile;
  222.                 _totalFreeAddressSpace = totalFreeAddressSpace;
  223.                 _lastKnownFreeAddressSpace = lastKnownFreeAddressSpace;
  224.                 _reservedMem = reservedMem;
  225.             }
  226.            
  227.             public override string ToString()
  228.             {
  229.                 return String.Format(System.Globalization.CultureInfo.InvariantCulture, "MemoryGate: Checking for {0} MB, for allocation size of {1} MB. Need page file? {2} Need Address Space? {3} Need Contiguous address space? {4} Avail page file: {5} MB Total free VA space: {6} MB Contiguous free address space (found): {7} MB Reserved space: {8} MB", _segmentSize >> 20, _allocationSizeInMB, _needPageFile, _needAddressSpace, _needContiguousVASpace, _availPageFile >> 20, _totalFreeAddressSpace >> 20, _lastKnownFreeAddressSpace >> 20,
  230.                 _reservedMem);
  231.             }
  232.         }
  233.         #endif
  234.     }
  235. }

Developer Fusion