singrdk/base/Kernel/Singularity/Memory/Stacks.cs

562 lines
23 KiB
C#
Raw Normal View History

2008-03-05 09:52:00 -05:00
////////////////////////////////////////////////////////////////////////////////
//
// Microsoft Research Singularity
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
2008-11-17 18:29:00 -05:00
// Primitive stack segment manager
2008-03-05 09:52:00 -05:00
//
2008-11-17 18:29:00 -05:00
#if SINGULARITY_LINKED_STACKS
#else
#define USE_BIG_STACKS
#endif
2008-03-05 09:52:00 -05:00
2008-11-17 18:29:00 -05:00
//#define DEBUG_STACK_VERBOSE
//#define DO_TRACE_STACKS
2008-03-05 09:52:00 -05:00
namespace Microsoft.Singularity.Memory
{
2008-11-17 18:29:00 -05:00
2008-03-05 09:52:00 -05:00
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Threading;
using System.GCs;
using Microsoft.Singularity;
2008-11-17 18:29:00 -05:00
using Microsoft.Singularity.Isal;
internal partial class Stacks {
private static GetKernelStackCallback getKernelStackCallback;
private static GetSipStackCallback getSipStackCallback;
private static ReturnKernelStackCallback returnKernelStackCallback;
private static ReturnSipStackCallback returnSipStackCallback;
2008-03-05 09:52:00 -05:00
// This constant gives a reasonable size for an initial stack
// chunk, leaving room for the metadata that will be added to
// the top of the stack (sizeof(StackHead)).
2008-11-17 18:29:00 -05:00
#if USE_BIG_STACKS
internal const int InitialStackSize = 0xfb00;
internal const int SafetyBufferSize = 0x0400;
#elif ISA_IX64 || ISA_ARM || PHXBRIDGE
// TODO: FIXFIX set back to f00
internal const int InitialStackSize = 0xfb00;
internal const int SafetyBufferSize = 0x0400;
#else
internal const int InitialStackSize = 0x0f00;
internal const int SafetyBufferSize = 0x0000;
#endif
2008-03-05 09:52:00 -05:00
[StructLayout(LayoutKind.Sequential)]
internal struct StackHead
{
internal UIntPtr prevBegin;
internal UIntPtr prevLimit;
internal UIntPtr esp;
};
2008-11-17 18:29:00 -05:00
internal static void Initialize()
2008-03-05 09:52:00 -05:00
{
Tracing.Log(Tracing.Debug, "Stacks.Initialize() called");
2008-11-17 18:29:00 -05:00
getKernelStackCallback = new GetKernelStackCallback();
getSipStackCallback = new GetSipStackCallback();
returnKernelStackCallback = new ReturnKernelStackCallback();
returnSipStackCallback = new ReturnSipStackCallback();
}
internal static void Finalize()
{
Tracing.Log(Tracing.Debug, "Stacks.Finalize() KernelStacks");
Tracing.Log(Tracing.Debug, "Stacks.Finalize()");
}
private class GetKernelStackCallback : Isa.ICallback
{
internal override UIntPtr Callback(UIntPtr param)
{
VTable.Assert(Isa.IsRunningOnInterruptStack);
2008-03-05 09:52:00 -05:00
2008-11-17 18:29:00 -05:00
unsafe {
return GetStackSegment(param,
ref *Processor.GetCurrentThreadContext(), true, false);
}
}
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
private class GetSipStackCallback : Isa.ICallback
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
internal override UIntPtr Callback(UIntPtr param)
{
VTable.Assert(Isa.IsRunningOnInterruptStack);
unsafe {
UIntPtr stack = GetStackSegment(param,
ref *Processor.GetCurrentThreadContext(),
false, false);
if (stack == 0) {
// Allocate from the kernel reservation so we may terminate the SIP
stack = GetStackSegment(param, ref *Processor.GetCurrentThreadContext(),
true, false);
// Note that even if we failed again and are returning null, we
// must return before any overflow handling logic, to get off
// the interrupt stack.
}
return stack;
}
2008-03-05 09:52:00 -05:00
}
}
2008-11-17 18:29:00 -05:00
private class ReturnKernelStackCallback : Isa.ICallback
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
internal override UIntPtr Callback(UIntPtr param)
{
VTable.Assert(Isa.IsRunningOnInterruptStack);
unsafe {
ReturnStackSegmentRawCommon(ref *Processor.GetCurrentThreadContext(),
true, false);
}
return 0;
2008-03-05 09:52:00 -05:00
}
}
2008-11-17 18:29:00 -05:00
private class ReturnSipStackCallback : Isa.ICallback
{
internal override UIntPtr Callback(UIntPtr param)
{
VTable.Assert(Isa.IsRunningOnInterruptStack);
unsafe {
ReturnStackSegmentRawCommon(ref *Processor.GetCurrentThreadContext(),
false, false);
}
return 0;
}
}
[NoStackLinkCheckTrans]
internal static UIntPtr GetSipStackSegment(UIntPtr size)
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
UIntPtr stack;
// @TODO: Historically we have disabled interrupts around stack growth.
// Actually I think it is unnecessary; however to be conservative for
// now we will disable interrupts while we use the interrupt stack.
bool en = Processor.DisableInterrupts();
try {
unsafe {
// Sanity check: we allocate from the current stack segment, and
// will set the thread context to point to a new stack segment
VTable.Assert(Isa.GetStackPointer() <=
Processor.GetCurrentThreadContext()->stackBegin);
VTable.Assert(Isa.GetStackPointer() >=
Processor.GetCurrentThreadContext()->stackLimit);
}
stack = Isa.CallbackOnInterruptStack(getSipStackCallback, size);
}
finally {
Processor.RestoreInterrupts(en);
}
return stack;
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
[NoStackLinkCheckTrans]
internal static UIntPtr GetKernelStackSegment(UIntPtr size)
{
UIntPtr stack;
// @TODO: see note about disabling interrupts above.
bool en = Processor.DisableInterrupts();
try {
unsafe {
// Sanity check: we allocate from the current stack segment, and
// will set the thread context to point to a new stack segment
VTable.Assert(Isa.GetStackPointer() <=
Processor.GetCurrentThreadContext()->stackBegin);
VTable.Assert(Isa.GetStackPointer() >=
Processor.GetCurrentThreadContext()->stackLimit);
}
stack = Isa.CallbackOnInterruptStack(getKernelStackCallback, size);
}
finally {
Processor.RestoreInterrupts(en);
}
return stack;
}
//
// This is called for each new thread to get the initial stack segment.
//
[NoStackLinkCheckTrans]
internal static UIntPtr GetInitialStackSegment(ref ThreadContext context)
2008-03-05 09:52:00 -05:00
{
// The first stack segment is always in kernel memory
2008-11-17 18:29:00 -05:00
UIntPtr ret = GetStackSegment(0, ref context, true, true);
return ret;
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
[NoStackLinkCheckTrans]
2008-03-05 09:52:00 -05:00
internal static unsafe UIntPtr GetStackSegment(UIntPtr size,
ref ThreadContext context,
2008-11-17 18:29:00 -05:00
bool kernelAllocation,
bool initialStack)
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
#if SINGULARITY_LINKED_STACKS
#else
if (!initialStack) {
// If we get here, then the initial stack size must not have
// been sufficient to ensure that we don't need linked stacks.
DebugStub.Break();
}
#endif
2008-03-05 09:52:00 -05:00
UIntPtr begin = context.stackBegin;
UIntPtr limit = context.stackLimit;
2008-11-17 18:29:00 -05:00
#if DO_TRACE_STACKS
2008-03-05 09:52:00 -05:00
Kernel.Waypoint(666);
#endif
2008-11-17 18:29:00 -05:00
StackHead *head = GetStackSegmentRaw(size, ref context, kernelAllocation, initialStack);
2008-03-05 09:52:00 -05:00
if (head != null) {
head->prevBegin = begin;
head->prevLimit = limit;
head->esp = 0;
}
return (UIntPtr)head;
}
2008-11-17 18:29:00 -05:00
[NoStackLinkCheckTrans]
2008-03-05 09:52:00 -05:00
internal static unsafe StackHead * GetStackSegmentRaw(UIntPtr size,
ref ThreadContext context,
2008-11-17 18:29:00 -05:00
bool kernelAllocation,
bool initialStack)
2008-03-05 09:52:00 -05:00
{
// Allocate a new chunk, making room for StackHead at the top.
// If you change these constants to add more data, see the
// comment about InitialStackSize at the top of this file!
2008-11-17 18:29:00 -05:00
#if DO_TRACE_STACKS
2008-03-05 09:52:00 -05:00
Kernel.Waypoint(667);
#endif
if (size == UIntPtr.Zero) {
size = InitialStackSize;
}
2008-11-17 18:29:00 -05:00
size = MemoryManager.PagePad(size + sizeof(StackHead) + SafetyBufferSize);
2008-03-05 09:52:00 -05:00
UIntPtr chunk;
Process owner = Process.GetProcessByID(context.processId);
2008-11-17 18:29:00 -05:00
//
//// NOTE: here's where we should be clever about
//// whether to allocate a stack chunk in the user range
//// or the kernel range. Except, if we switch contexts
//// during an ABI call while using a user-range stack
//// segment on a paging machine, we die. Gloss over
//// this hackily by always getting stack segments
//// from the kernel range.
//if (kernelAllocation || (owner == Process.kernelProcess)) {
// chunk = MemoryManager.KernelAllocate(
// MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
//}
//else {
// chunk = MemoryManager.UserAllocate(
// MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
//}
//
UIntPtr pageCount = MemoryManager.PagesFromBytes(size);
#if DEBUG_STACK_VERBOSE
fixed (ThreadContext *ptr = &context) {
Tracing.Log(Tracing.Debug,
"GetStackSegmentRaw(ctx={0:x8},size={1:d}) pages={2} [{3:x8}..{4:x8}]",
(UIntPtr)ptr, size, pageCount,
context.stackLimit, context.stackBegin);
}
#endif
chunk = MemoryManager.StackAllocate(pageCount, owner, 0, kernelAllocation, initialStack);
2008-03-05 09:52:00 -05:00
if (chunk != UIntPtr.Zero) {
// NB: We do _not_ zero out stack memory!
// We assume that Bartok prevents access to prev contents.
StackHead *head = (StackHead *)(chunk + size - sizeof(StackHead));
context.stackBegin = chunk + size;
2008-11-17 18:29:00 -05:00
context.stackLimit = chunk + SafetyBufferSize;
2008-03-05 09:52:00 -05:00
#if DEBUG_STACK_VERBOSE
2008-11-17 18:29:00 -05:00
Tracing.Log(Tracing.Debug,
"GetStackSegmentRaw(size={0:d}) -> [{1:x8}..{2:x8}]",
2008-03-05 09:52:00 -05:00
size, context.stackLimit, context.stackBegin);
#endif
return head;
}
else {
// Stack allocation failed. In the future, we should
// trigger a kernel exception; for now, we break to the
// debugger.
2008-11-17 18:29:00 -05:00
#if DEBUG_STACK_VERBOSE
Tracing.Log(Tracing.Debug,
"GetStackSegmentRaw: KernelAllocate failed!(siz={0:d})",
size);
#endif
//DebugStub.Break();
2008-03-05 09:52:00 -05:00
return null;
}
}
2008-11-17 18:29:00 -05:00
// This is called when returning a kernel stack segment
[AccessedByRuntime("referenced from halstack.asm")]
[NoStackOverflowCheck]
internal static void ReturnKernelStackSegment()
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
// @TODO: see note about disabling interrupts above.
bool en = Processor.DisableInterrupts();
try {
Isa.CallbackOnInterruptStack(returnKernelStackCallback, 0);
unsafe {
// Sanity check: we freed from the previous segment, and
// should have set the thread context to point to this segment now.
VTable.Assert(Isa.GetStackPointer() <=
Processor.GetCurrentThreadContext()->stackBegin);
VTable.Assert(Isa.GetStackPointer() >=
Processor.GetCurrentThreadContext()->stackLimit);
}
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
finally {
Processor.RestoreInterrupts(en);
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
}
2008-03-05 09:52:00 -05:00
2008-11-17 18:29:00 -05:00
// This is called when returning a stack segment allocated for a SIP
[AccessedByRuntime("referenced from halstack.asm")]
[NoStackOverflowCheck]
internal static void ReturnSipStackSegment()
{
// @TODO: see note about disabling interrupts above.
bool en = Processor.DisableInterrupts();
try {
Isa.CallbackOnInterruptStack(returnSipStackCallback, 0);
unsafe {
// Sanity check: we freed from the previous segment, and
// should have set the thread context to point to this segment now.
VTable.Assert(Isa.GetStackPointer() <=
Processor.GetCurrentThreadContext()->stackBegin);
VTable.Assert(Isa.GetStackPointer() >=
Processor.GetCurrentThreadContext()->stackLimit);
}
}
finally {
Processor.RestoreInterrupts(en);
2008-03-05 09:52:00 -05:00
}
}
[NoStackOverflowCheck]
2008-11-17 18:29:00 -05:00
internal static unsafe void ActivatePreviousStackSegmentLimit()
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
// To avoid sprinkling [NoStackOverflowCheck] attributes
// on too many methods, we manually inline a couple of methods.
// ThreadContext *context = Processor.GetCurrentThreadContext();
ThreadRecord *threadRecord = Isa.GetCurrentThread();
ThreadContext *context = (ThreadContext *) threadRecord;
StackHead *head = (StackHead *)
(context->stackBegin - sizeof(StackHead));
// Isa.StackLimit = head->prevLimit;
threadRecord->activeStackLimit = head->prevLimit;
}
[AccessedByRuntime("referenced from halstack.asm")]
[NoStackLinkCheckTrans]
internal static unsafe void ReturnStackSegmentRawCommon(ref ThreadContext context,
bool kernelAllocation,
bool initialStack)
{
UIntPtr begin = context.stackBegin;
UIntPtr limit = context.stackLimit;
2008-03-05 09:52:00 -05:00
StackHead *head = (StackHead *)(begin - sizeof(StackHead));
2008-11-17 18:29:00 -05:00
#if DO_TRACE_STACKS
2008-03-05 09:52:00 -05:00
Kernel.Waypoint(669);
#endif
2008-11-17 18:29:00 -05:00
UIntPtr addr = limit - SafetyBufferSize;
UIntPtr size = begin - limit + SafetyBufferSize;
2008-03-05 09:52:00 -05:00
#if DEBUG_STACK_VERBOSE
fixed (ThreadContext *ptr = &context) {
2008-11-17 18:29:00 -05:00
Tracing.Log(Tracing.Debug,
"ReturnStackSegmentRaw(ctx={0:x8}) [{1:x8}..{2:x8}]\n",
(UIntPtr)ptr, context.stackLimit, context.stackBegin);
2008-03-05 09:52:00 -05:00
}
#endif
#if !PAGING
context.stackBegin = head->prevBegin;
context.stackLimit = head->prevLimit;
#else
//context.stackBegin = head->prevBegin;
//context.stackLimit = head->prevLimit;
// Moved below, because of the following scenario:
// - call UnlinkStack
// - UnlinkStack switches to the scheduler stack
// - UnlinkStack calls ReturnStackSegmentRaw, which calls
// various other methods
// - one of the other methods invokes write barrier code
// - the write barrier code performs a stack link check
// - If context.stackLimit is already set to head->prevLimit,
// then it may appear that we're out of stack space,
// even if we're really not, so we jump to LinkStack
// - LinkStack overwrites the scheduler stack
// TODO: really fix this.
UIntPtr stackBegin = head->prevBegin;
UIntPtr stackLimit = head->prevLimit;
#endif
Process owner = Process.GetProcessByID(context.processId);
2008-11-17 18:29:00 -05:00
//
//// See note above in GetStackSegmentRaw
//if ((owner != Process.kernelProcess) &&
//(addr >= BootInfo.KERNEL_BOUNDARY)) {
//MemoryManager.UserFree(addr, MemoryManager.PagesFromBytes(size), owner);
//}
//else {
//MemoryManager.KernelFree(addr, MemoryManager.PagesFromBytes(size), owner);
//}
//
MemoryManager.StackFree(addr, MemoryManager.PagesFromBytes(size), owner, kernelAllocation, initialStack);
2008-03-05 09:52:00 -05:00
#if PAGING
// See comments above.
context.stackBegin = stackBegin;
context.stackLimit = stackLimit;
#endif
2008-11-17 18:29:00 -05:00
#if DEBUG_STACK_VERBOSE
Tracing.Log(Tracing.Debug,
"ReturnStackSegment({0:x8}, {1:x8}) [{2:x8}..{3:x8}]\n",
addr, size, context.stackLimit, context.stackBegin);
#endif
2008-03-05 09:52:00 -05:00
}
2008-11-17 18:29:00 -05:00
//
// This is called when a thread is destroyed and its last
// stack segment is returned to the system
//
2008-03-05 09:52:00 -05:00
[AccessedByRuntime("referenced from halstack.asm")]
2008-11-17 18:29:00 -05:00
[NoStackLinkCheckTrans]
2008-03-05 09:52:00 -05:00
// NB: This function must execute in low-stack conditions!
// See the comment at the top of this file.
2008-11-17 18:29:00 -05:00
internal static void ReturnInitialStackSegment(ref ThreadContext context)
2008-03-05 09:52:00 -05:00
{
2008-11-17 18:29:00 -05:00
ReturnStackSegmentRawCommon(ref context, true, true);
}
//
// This is called when cleaning up orphaned stack segments of the thread
// when it is destroyed, usually as a result of an exception such as SIP
// stack overflow.
//
[AccessedByRuntime("referenced from halstack.asm")]
[NoStackLinkCheckTrans]
// NB: This function must execute in low-stack conditions!
// See the comment at the top of this file.
internal static void ReturnStackSegment(ref ThreadContext context)
{
ReturnStackSegmentRawCommon(ref context, true, false);
}
//
// This is invoked by ring0_halstack.asm when a SIP stack overflows
// and no more memory can be allocated from the OS for it.
//
// It is expected that the SIP is "failed fast" and does not
// return from this call.
//
[ExternalEntryPoint]
[AccessedByRuntime("reference from halstack.asm")]
internal static void StackOverflowForSIP()
{
DebugStub.WriteLine("******** SIP OOM on Stack, Failing Fast ********");
// This does not make a stack transition record
Thread.CurrentProcess.Stop((int)System.ProcessExitCode.ErrorDefault);
// Should not return
DebugStub.Break();
2008-03-05 09:52:00 -05:00
}
internal static unsafe void WalkStack(UIntPtr ebp)
{
System.GCs.CallStack.TransitionRecord *kernMarker;
System.GCs.CallStack.TransitionRecord *procMarker;
kernMarker = Processor.GetCurrentThreadContext()->stackMarkers;
procMarker = Processor.GetCurrentThreadContext()->processMarkers;
2008-11-17 18:29:00 -05:00
UIntPtr ebpKern = kernMarker != null ? kernMarker->calleeSaveRegisters.GetFramePointer() : UIntPtr.Zero;
UIntPtr ebpProc = procMarker != null ? procMarker->calleeSaveRegisters.GetFramePointer() : UIntPtr.Zero;
2008-03-05 09:52:00 -05:00
#if DEBUG_STACK_VERBOSE
fixed (byte * begin = &LinkStackBegin) {
fixed (byte * limit = &LinkStackLimit) {
2008-11-17 18:29:00 -05:00
DebugStub.Print("LinkStack: {0:x8}..{1:x8}\n",
2008-03-05 09:52:00 -05:00
__arglist((UIntPtr)begin, (UIntPtr)limit));
}
}
#endif
DebugStub.Print("EBP={0:x8}, kernMarkers={1:x8}, procMarkers={2:x8}\n",
__arglist(ebp, (UIntPtr)kernMarker, (UIntPtr)procMarker));
DebugStub.Print("EBP.....: EBP..... EIP..... transitn nexttran stbottom\n");
while (ebp != UIntPtr.Zero) {
if (ebp == ebpKern) {
DebugStub.Print("--kern--: {0:x8} {1:x8} {2:x8} {3:x8} {4:x8}\n",
__arglist(ebpKern,
(UIntPtr)kernMarker,
kernMarker->callAddr,
(UIntPtr)kernMarker->oldTransitionRecord,
kernMarker->stackBottom));
kernMarker = kernMarker->oldTransitionRecord;
2008-11-17 18:29:00 -05:00
ebpKern = kernMarker != null ? kernMarker->calleeSaveRegisters.GetFramePointer() : UIntPtr.Zero;
2008-03-05 09:52:00 -05:00
}
if (ebp == ebpProc) {
DebugStub.Print("--proc--: {0:x8} {1:x8} {2:x8} {3:x8} {4:x8}: \n",
__arglist(ebpProc,
(UIntPtr)procMarker,
procMarker->callAddr,
(UIntPtr)procMarker->oldTransitionRecord,
procMarker->stackBottom));
procMarker = procMarker->oldTransitionRecord;
2008-11-17 18:29:00 -05:00
ebpProc = procMarker != null ? procMarker->calleeSaveRegisters.GetFramePointer() : UIntPtr.Zero;
2008-03-05 09:52:00 -05:00
}
DebugStub.Print("{0:x8}: {1:x8} {2:x8}\n",
__arglist(ebp,
((UIntPtr*)ebp)[0], ((UIntPtr*)ebp)[1]));
if (((UIntPtr*)ebp)[1] == UIntPtr.Zero) {
break;
}
ebp = ((UIntPtr*)ebp)[0];
}
// DebugStub.Break();
}
}
}