770 lines
31 KiB
Plaintext
770 lines
31 KiB
Plaintext
///////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// Microsoft Research Singularity
|
|
//
|
|
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
//
|
|
// File: Libraries\Resiliency\JournalProducer.sg
|
|
//
|
|
// Note: Logging Worker Creator Template
|
|
//
|
|
using System;
|
|
using System.Collections;
|
|
using System.Collections.Specialized;
|
|
using System.Threading;
|
|
using Microsoft.SingSharp;
|
|
using Microsoft.SingSharp.Reflection;
|
|
using Microsoft.Singularity;
|
|
using Microsoft.Singularity.Channels;
|
|
using Microsoft.Singularity.Configuration;
|
|
using Microsoft.Singularity.Directory;
|
|
using Microsoft.Singularity.Services;
|
|
using Microsoft.Singularity.ServiceManager;
|
|
|
|
namespace Microsoft.Singularity.Resiliency
|
|
{
|
|
public abstract class JournalProducer
|
|
{
|
|
protected Thread dsThread;
|
|
protected Thread managerThread;
|
|
protected Thread providerThread;
|
|
|
|
protected TRef<DirectoryServiceContract.Imp:Ready> rootDsRef;
|
|
protected TRef<DirectoryServiceContract.Exp:Ready> myDsRef;
|
|
protected TRef<ServiceProviderContract.Imp:Start> providerRef;
|
|
protected TRef<ServiceProviderContract.Exp:Start> myProviderRef;
|
|
protected TRef<ManagedServiceContract.Exp:Ready> serviceRef;
|
|
protected TRef<ServiceControlContract.Imp:Ready> controlRef;
|
|
protected TRef<ManagedProxyContract.Imp:Ready> managerRef;
|
|
|
|
protected TRef<ThreadTerminationContract.Imp:Start> dsSignalSenderRef;
|
|
protected TRef<ThreadTerminationContract.Exp:Start> dsSignalReceiverRef;
|
|
protected TRef<ThreadTerminationContract.Imp:Start> mgSignalSenderRef;
|
|
protected TRef<ThreadTerminationContract.Exp:Start> mgSignalReceiverRef;
|
|
protected TRef<ThreadTerminationContract.Imp:Start> spSignalSenderRef;
|
|
protected TRef<ThreadTerminationContract.Exp:Start> spSignalReceiverRef;
|
|
protected TRef<ThreadTerminationContract.Imp:Start> recoverySignalSenderRef;
|
|
protected TRef<ThreadTerminationContract.Exp:Start> recoverySignalReceiverRef;
|
|
|
|
protected IList! journaletList;
|
|
protected Object! journaletListLock;
|
|
|
|
public JournalProducer()
|
|
{
|
|
this.journaletList = new ArrayList();
|
|
this.journaletListLock = new Object();
|
|
}
|
|
|
|
/// <summary>
|
|
/// Checks the type of this endpoint is that of the subclass can handle.
|
|
/// </summary>
|
|
protected abstract bool Accept(ServiceContract.Exp:Start! ep);
|
|
|
|
/// <summary>
|
|
/// Substitutes the ServiceContract with a new one in order to
|
|
/// intercept the messages exchanged through the given ServiceContract.
|
|
/// </summary>
|
|
protected abstract void Substitute([Claims]ServiceContract.Exp:Start! ep,
|
|
out ServiceContract.Exp! newEp);
|
|
|
|
public void RegisterJournalet(Journalet j)
|
|
{
|
|
if (j != null) {
|
|
lock (journaletListLock) {
|
|
try {
|
|
journaletList.Add(j);
|
|
}
|
|
catch (Exception) {}
|
|
}
|
|
}
|
|
}
|
|
|
|
public void DeregisterJournalet(Journalet j)
|
|
{
|
|
if (j != null) {
|
|
lock (journaletListLock) {
|
|
try {
|
|
journaletList.Remove(j);
|
|
}
|
|
catch (Exception) {}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// This actually launches the JournalProducer. The management thread
|
|
/// is started here.
|
|
///
|
|
/// JournalProducer is initialized with three channels.
|
|
/// mep Channel from the service manager. JournalProducer is a
|
|
/// kind of service, so it has to be controlled by the service
|
|
/// manager.
|
|
/// dep Channel to the (root) directory service. This channel is
|
|
/// necessary to intercept the connection request from a client
|
|
/// to the server that JournalProducer supports.
|
|
/// fep Channel from the target service. This channel pretends the
|
|
/// root directory service.
|
|
///
|
|
/// NOTE: x is an temporal solution so that JournalProducer receives a
|
|
/// new DS channel during the service recovery.
|
|
/// </summary>
|
|
public void Start([Claims]ManagedServiceContract.Exp:Start! mep,
|
|
[Claims]DirectoryServiceContract.Imp:Ready! dep,
|
|
[Claims]DirectoryServiceContract.Exp:Start! fep,
|
|
[Claims]ManagedProxyContract.Imp:Start! pep)
|
|
{
|
|
ThreadTerminationContract.Imp! sender;
|
|
ThreadTerminationContract.Exp! receiver;
|
|
|
|
//
|
|
// The root directory service
|
|
//
|
|
rootDsRef = new TRef<DirectoryServiceContract.Imp:Ready>(dep);
|
|
|
|
//
|
|
// Initialize connection to the service manager
|
|
//
|
|
mep.SendSuccess();
|
|
serviceRef = new TRef<ManagedServiceContract.Exp:Ready>(mep);
|
|
ThreadTerminationContract.NewChannel(out sender, out receiver);
|
|
mgSignalSenderRef = new TRef<ThreadTerminationContract.Imp:Start>(sender);
|
|
mgSignalReceiverRef = new TRef<ThreadTerminationContract.Exp:Start>(receiver);
|
|
|
|
//
|
|
// Initialize my directory service
|
|
//
|
|
fep.SendSuccess();
|
|
myDsRef = new TRef<DirectoryServiceContract.Exp:Ready>(fep);
|
|
|
|
//
|
|
// Initialize the channel to the service process
|
|
//
|
|
// xep.SendSuccess();
|
|
// proxyRef = new TRef<ProxyContract.Exp:Ready>(xep);
|
|
|
|
//
|
|
// Set up another connection for recovery to the service manager
|
|
//
|
|
switch receive {
|
|
case pep.Success():
|
|
break;
|
|
case unsatisfiable:
|
|
DebugStub.Break();
|
|
break;
|
|
}
|
|
managerRef = new TRef<ManagedProxyContract.Imp:Ready>(pep);
|
|
ThreadTerminationContract.NewChannel(out sender, out receiver);
|
|
recoverySignalSenderRef = new TRef<ThreadTerminationContract.Imp:Start>(sender);
|
|
recoverySignalReceiverRef = new TRef<ThreadTerminationContract.Exp:Start>(receiver);
|
|
|
|
//
|
|
// Set up the service controller
|
|
//
|
|
/*
|
|
cep.RecvSuccess();
|
|
controlRef = new TRef<ServiceControlContract.Imp:Ready>(cep);
|
|
*/
|
|
|
|
//
|
|
// Set up the thread terminator for the substitute DS thread
|
|
//
|
|
ThreadTerminationContract.NewChannel(out sender, out receiver);
|
|
dsSignalSenderRef = new TRef<ThreadTerminationContract.Imp:Start>(sender);
|
|
dsSignalReceiverRef = new TRef<ThreadTerminationContract.Exp:Start>(receiver);
|
|
|
|
//
|
|
// Set up the thread terminator for the producer thread
|
|
//
|
|
ThreadTerminationContract.NewChannel(out sender, out receiver);
|
|
spSignalSenderRef = new TRef<ThreadTerminationContract.Imp:Start>(sender);
|
|
spSignalReceiverRef = new TRef<ThreadTerminationContract.Exp:Start>(receiver);
|
|
|
|
//
|
|
// Create the substitute DS thread
|
|
//
|
|
dsThread = new Thread(new ThreadStart(DirectoryServiceThread));
|
|
|
|
//
|
|
// Create the journalet producer thread
|
|
//
|
|
providerThread = new Thread(new ThreadStart(ProviderThread));
|
|
|
|
//
|
|
// Create and start the service management thread
|
|
//
|
|
managerThread = new Thread(new ThreadStart(ManagerThread));
|
|
managerThread.Start();
|
|
}
|
|
|
|
/// <summary>
|
|
/// JournalProducer provides a directory service on behalf of the
|
|
/// root DirectoryService so that it intercepts all outputs of
|
|
/// a service process.
|
|
///
|
|
/// Here you will see the first logic of the directory service. It
|
|
/// transits back and forth between the normal state and the recovery
|
|
/// state. This seems simple but it's a bit tricky. The normal operation
|
|
/// state normally deals with DirectoryServiceContract, but it also
|
|
/// creates or deletes logging workers.
|
|
///
|
|
/// The recovery operation state recovers the service registration
|
|
/// state. A service process registers its ServiceProviderContract
|
|
/// to the DirectoryService. JournalProducer intercepts this
|
|
/// registration and registers another ServiceProviderContract to the
|
|
/// DirectoryService. The recovery operation doesn't create and
|
|
/// register a new ServiceProvider because it is already registered
|
|
/// to the DirectoryService.
|
|
/// </summary>
|
|
/// NOTE: Current implementation doesn't support all protocols of the
|
|
/// DirectoryServiceContract.
|
|
protected void DirectoryServiceThread()
|
|
{
|
|
ThreadTerminationContract.Imp:Start! signalToManager;
|
|
|
|
signalToManager = mgSignalSenderRef.Acquire();
|
|
for (;;) {
|
|
if (!HandleDirectoryService()) {
|
|
break;
|
|
}
|
|
|
|
// Trigger the recovery
|
|
signalToManager.SendStop();
|
|
switch receive {
|
|
case signalToManager.AckStop():
|
|
// Transit to the recovery mode
|
|
if (!RecoverDirectoryService()) {
|
|
goto exit;
|
|
}
|
|
break;
|
|
case signalToManager.ChannelClosed():
|
|
// Recovery failed
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
exit:
|
|
mgSignalSenderRef.Release(signalToManager);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Emulates a directory service.
|
|
/// </summary>
|
|
protected bool HandleDirectoryService()
|
|
{
|
|
// Flag for the recovery mode outside this method
|
|
bool recovery = false;
|
|
DirectoryServiceContract.Exp:Ready! myDS;
|
|
DirectoryServiceContract.Imp:Ready! rootDS;
|
|
ThreadTerminationContract.Exp:Start! signalFromManager;
|
|
ThreadTerminationContract.Exp:Start! signalFromProvider;
|
|
ThreadTerminationContract.Imp:Start! signalToProvider;
|
|
|
|
myDS = myDsRef.Acquire();
|
|
rootDS = rootDsRef.Acquire();
|
|
signalToProvider = spSignalSenderRef.Acquire();
|
|
signalFromManager = dsSignalReceiverRef.Acquire();
|
|
signalFromProvider = recoverySignalReceiverRef.Acquire();
|
|
|
|
for (;;) {
|
|
switch receive {
|
|
//
|
|
// The following 4 cases are message from the service
|
|
// process.
|
|
//
|
|
case myDS.Bind(path, exp):
|
|
{
|
|
DebugStub.Print("JP DS: Bind, break\n");
|
|
DebugStub.Break();
|
|
delete path;
|
|
delete exp;
|
|
break;
|
|
}
|
|
case myDS.Register(path, imp):
|
|
{
|
|
ServiceProviderContract.Imp! client;
|
|
ServiceProviderContract.Exp! server;
|
|
|
|
// HI: Current implementation can handle only one name
|
|
// per service.
|
|
DebugStub.Print("JP DS: Register @ '{0}'\n",
|
|
__arglist(Bitter.ToString2(path)));
|
|
providerRef = new TRef<ServiceProviderContract.Imp:Start>(imp);
|
|
ServiceProviderContract.NewChannel(out client, out server);
|
|
myProviderRef = new TRef<ServiceProviderContract.Exp:Start>(server);
|
|
// Now register the JP's endpoint to the NS.
|
|
rootDS.SendRegister(path, client);
|
|
break;
|
|
}
|
|
case myDS.Deregister(path):
|
|
{
|
|
DebugStub.Print("JP DS: Deregister\n");
|
|
rootDS.SendDeregister(path);
|
|
break;
|
|
}
|
|
case myDS.ChannelClosed():
|
|
{
|
|
DebugStub.Print("JP DS: Server channel closed." +
|
|
" Waits for provider's signal.\n");
|
|
switch receive {
|
|
case signalFromProvider.Stop():
|
|
{
|
|
signalFromProvider.SendAckStop();
|
|
recovery = true;
|
|
break;
|
|
}
|
|
case signalFromProvider.ChannelClosed():
|
|
{
|
|
recovery = true;
|
|
break;
|
|
}
|
|
case signalFromManager.Stop():
|
|
{
|
|
signalToProvider.SendStop();
|
|
signalToProvider.RecvAckStop();
|
|
delete myProviderRef.Acquire();
|
|
delete providerRef.Acquire();
|
|
providerRef = null;
|
|
signalFromManager.SendAckStop();
|
|
goto exit;
|
|
break;
|
|
}
|
|
case signalFromManager.ChannelClosed():
|
|
{
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
DebugStub.Print("JP DS: Provider's signal received.\n");
|
|
goto exit;
|
|
break;
|
|
}
|
|
|
|
//
|
|
// The following messages are delivered from DS.
|
|
//
|
|
case rootDS.AckRegister():
|
|
{
|
|
DebugStub.Print("JP DS: AckRegister\n");
|
|
myDS.SendAckRegister();
|
|
//
|
|
// HI: At this point producer thread is allowed to
|
|
// start.
|
|
//
|
|
providerThread.Start();
|
|
break;
|
|
}
|
|
case rootDS.NakRegister(ep, error):
|
|
{
|
|
DebugStub.Print("JP DS: NakRegister\n");
|
|
delete ep;
|
|
delete myProviderRef.Acquire();
|
|
myDS.SendNakRegister(providerRef.Acquire(), error);
|
|
providerRef = null;
|
|
break;
|
|
}
|
|
case rootDS.NakRegisterReparse(path, rest, link, ep):
|
|
{
|
|
DebugStub.Print("JP DS: NakRegisterReparse\n");
|
|
delete ep;
|
|
delete myProviderRef.Acquire();
|
|
myDS.SendNakRegisterReparse(path, rest, link,
|
|
providerRef.Acquire());
|
|
providerRef = null;
|
|
break;
|
|
}
|
|
case rootDS.AckDeregister(ep):
|
|
{
|
|
DebugStub.Print("JP DS: AckDeregister\n");
|
|
delete ep;
|
|
signalToProvider.SendStop();
|
|
signalToProvider.RecvAckStop();
|
|
delete myProviderRef.Acquire();
|
|
myDS.SendAckDeregister(providerRef.Acquire());
|
|
providerRef = null;
|
|
break;
|
|
}
|
|
case rootDS.NakDeregister(error):
|
|
{
|
|
DebugStub.Print("JP DS: NakDeregister\n");
|
|
myDS.SendNakDeregister(error);
|
|
break;
|
|
}
|
|
case rootDS.NakDeregisterReparse(path, rest, link):
|
|
{
|
|
DebugStub.Print("JP DS: NakDeregisterReparse\n");
|
|
myDS.SendNakDeregisterReparse(path, rest, link);
|
|
break;
|
|
}
|
|
case rootDS.ChannelClosed():
|
|
{
|
|
DebugStub.Print("JP DS: DS channel closed. Break.\n");
|
|
// Emulate channel closing
|
|
delete providerRef.Acquire();
|
|
providerRef = null;
|
|
DebugStub.Break();
|
|
break;
|
|
}
|
|
|
|
case signalFromManager.Stop():
|
|
{
|
|
signalToProvider.SendStop();
|
|
signalToProvider.RecvAckStop();
|
|
|
|
delete myProviderRef.Acquire();
|
|
delete providerRef.Acquire();
|
|
providerRef = null;
|
|
signalFromManager.SendAckStop();
|
|
goto exit;
|
|
break;
|
|
}
|
|
|
|
//
|
|
// Signal from ProviderThread in this object. This is the
|
|
// trigger to start recovery.
|
|
//
|
|
case signalFromProvider.Stop():
|
|
DebugStub.Print("JP DS: Received signal." +
|
|
" Start recovery.\n");
|
|
recovery = true;
|
|
signalFromProvider.SendAckStop();
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
exit:
|
|
recoverySignalReceiverRef.Release(signalFromProvider);
|
|
spSignalSenderRef.Release(signalToProvider);
|
|
dsSignalReceiverRef.Release(signalFromManager);
|
|
|
|
myDsRef.Release(myDS);
|
|
rootDsRef.Release(rootDS);
|
|
|
|
return recovery;
|
|
} // HandleDirectoryService
|
|
|
|
protected bool RecoverDirectoryService()
|
|
{
|
|
DirectoryServiceContract.Exp:Ready! substitute;
|
|
|
|
DebugStub.Print("JP: ENTER RecoveryOperation\n");
|
|
//
|
|
// get a new Directory Service endpoint
|
|
//
|
|
substitute = myDsRef.Acquire();
|
|
DebugStub.Print("JP: Get new substitute.");
|
|
switch receive {
|
|
case substitute.Register(path, imp):
|
|
{
|
|
DebugStub.Print("JP DS Recovery: Re-register '{0}'\n",
|
|
__arglist(Bitter.ToString2(path)));
|
|
//
|
|
// Refresh the server-side ServiceProviderContract
|
|
//
|
|
DebugStub.Print("JP: Replacing the server-side channel... ");
|
|
delete providerRef.Acquire();
|
|
providerRef.Release(imp);
|
|
DebugStub.Print("done\n");
|
|
|
|
//
|
|
// This time, we immediately send back the ack.
|
|
//
|
|
substitute.SendAckRegister();
|
|
|
|
delete path;
|
|
break;
|
|
}
|
|
}
|
|
myDsRef.Release(substitute);
|
|
|
|
new Thread(new ThreadStart(RecoveryThread)).Start();
|
|
//DebugStub.Print("JP: EXIT RecoveryOperation\n");
|
|
return true;
|
|
|
|
}
|
|
|
|
/// <summary>
|
|
/// This thread communicates with SMS and deals with general stuff on
|
|
/// the service management.
|
|
/// </summary>
|
|
// Communicates with DirectoryServiceThread
|
|
protected void ManagerThread()
|
|
{
|
|
ManagedServiceContract.Exp:Ready! manager;
|
|
ManagedProxyContract.Imp:Ready! managerRecovery;
|
|
ThreadTerminationContract.Imp:Start! signalToDs;
|
|
ThreadTerminationContract.Exp:Start! signalFromDs;
|
|
|
|
manager = serviceRef.Acquire();
|
|
managerRecovery = managerRef.Acquire();
|
|
signalToDs = dsSignalSenderRef.Acquire();
|
|
signalFromDs = mgSignalReceiverRef.Acquire();
|
|
|
|
for (;;) {
|
|
//DebugStub.Print("JP Manager th: Set\n");
|
|
switch receive {
|
|
case manager.StartService():
|
|
{
|
|
dsThread.Start();
|
|
manager.SendAckStartService();
|
|
//else {
|
|
// manager.SendNakStartService();
|
|
//}
|
|
break;
|
|
}
|
|
case manager.StopService():
|
|
{
|
|
//DebugStub.Print("JPMan: StopService\n");
|
|
//providerThread.Stop();
|
|
manager.SendAckStopService();
|
|
break;
|
|
}
|
|
case manager.RestartService():
|
|
{
|
|
DebugStub.Print("JPMan: RestartService\n");
|
|
DebugStub.Break();
|
|
//providerThread.Stop();
|
|
//providerThread.Start();
|
|
manager.SendAckRestartService();
|
|
break;
|
|
}
|
|
case manager.Knock():
|
|
{
|
|
manager.SendAlive();
|
|
break;
|
|
}
|
|
case manager.Stop():
|
|
{
|
|
signalToDs.SendStop();
|
|
break;
|
|
}
|
|
case manager.Restart():
|
|
{
|
|
DebugStub.Break();
|
|
break;
|
|
}
|
|
case signalFromDs.Stop():
|
|
{
|
|
delete myDsRef.Acquire();
|
|
managerRecovery.SendRequestDSRecovery();
|
|
break;
|
|
}
|
|
// Receives a new DirectoryServiceContract for the
|
|
// restarted service process. JP provides the directory
|
|
// service again with this endpoint.
|
|
case managerRecovery.AckDSRecovery(directory):
|
|
{
|
|
directory.SendSuccess();
|
|
myDsRef.Release(directory);
|
|
signalFromDs.SendAckStop();
|
|
break;
|
|
}
|
|
case managerRecovery.NakDSRecovery():
|
|
{
|
|
goto exit;
|
|
}
|
|
case managerRecovery.ChannelClosed():
|
|
{
|
|
goto exit;
|
|
}
|
|
case signalToDs.AckStop():
|
|
{
|
|
manager.SendAckStop();
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
exit:
|
|
delete manager;
|
|
delete managerRecovery;
|
|
delete signalFromDs;
|
|
delete signalToDs;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Creates a Journalet for each C-S connection.
|
|
/// </summary>
|
|
protected void ProviderThread()
|
|
{
|
|
bool release = false;
|
|
bool signal = false;
|
|
bool recovery = false;
|
|
|
|
// From the root directory service
|
|
ServiceProviderContract.Exp:Start! providerFromRootDs;
|
|
// To the resilient service
|
|
ServiceProviderContract.Imp:Start! provider;
|
|
ThreadTerminationContract.Imp:Start! dsSignal;
|
|
ThreadTerminationContract.Exp:Start! signalFromDs;
|
|
ThreadTerminationContract.Imp:Start! signalToDs;
|
|
|
|
providerFromRootDs = myProviderRef.Acquire();
|
|
signalFromDs = spSignalReceiverRef.Acquire();
|
|
signalToDs = recoverySignalSenderRef.Acquire();
|
|
|
|
//
|
|
// lock the server
|
|
//
|
|
provider = providerRef.Acquire();
|
|
|
|
for (;;) {
|
|
switch receive {
|
|
case providerFromRootDs.Connect(ep):
|
|
{
|
|
//DebugStub.Print("JP Producer: received" +
|
|
// " client connection request ... ");
|
|
if (Accept(ep)) {
|
|
//DebugStub.Print("accepted.\n");
|
|
ServiceContract.Exp! newExp;
|
|
|
|
Substitute(ep, out newExp);
|
|
provider.SendConnect(newExp);
|
|
switch receive {
|
|
case provider.AckConnect():
|
|
providerFromRootDs.SendAckConnect();
|
|
break;
|
|
case provider.NackConnect(rejected):
|
|
delete rejected;
|
|
providerFromRootDs.SendNackConnect(null);
|
|
break;
|
|
case provider.ChannelClosed():
|
|
//
|
|
// Try to recover
|
|
//
|
|
providerFromRootDs.SendNackConnect(null);
|
|
release = true;
|
|
signal = false;
|
|
recovery = true;
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
else {
|
|
//DebugStub.Print("denied\n");
|
|
providerFromRootDs.SendNackConnect(ep);
|
|
}
|
|
break;
|
|
}
|
|
case providerFromRootDs.ChannelClosed():
|
|
{
|
|
goto exit;
|
|
break;
|
|
}
|
|
case signalFromDs.Stop():
|
|
{
|
|
release = false;
|
|
signal = true;
|
|
goto exit;
|
|
break;
|
|
}
|
|
case signalFromDs.ChannelClosed():
|
|
{
|
|
goto exit;
|
|
release = false;
|
|
break;
|
|
}
|
|
case provider.ChannelClosed():
|
|
{
|
|
// Recovery
|
|
DebugStub.Print("JP: Service provider lost. " +
|
|
"Recover.\n");
|
|
release = true;
|
|
signal = false;
|
|
recovery = true;
|
|
goto exit;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
exit:
|
|
|
|
// Unlock the server
|
|
// This allows the DirectoryServiceThread to replace the
|
|
// ServiceProviderContract with a new one.
|
|
providerRef.Release(provider);
|
|
|
|
if (recovery) {
|
|
// Notify the internal DS to restart
|
|
signalToDs.SendStop();
|
|
signalToDs.RecvAckStop();
|
|
}
|
|
|
|
// No matter if it recovers or not, sends a signal to get the
|
|
// DirectoryServiceThread out of the loop.
|
|
recoverySignalSenderRef.Release(signalToDs);
|
|
|
|
if (release) {
|
|
myProviderRef.Release(providerFromRootDs);
|
|
}
|
|
else {
|
|
// Disconnect from the root DS.
|
|
delete providerFromRootDs;
|
|
}
|
|
|
|
if (signal) {
|
|
// It's over. Get the ManagerThread out of loop.
|
|
signalFromDs.SendAckStop();
|
|
}
|
|
spSignalReceiverRef.Release(signalFromDs);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Runs Journalets to replay their logs to the restarted and refreshed
|
|
/// service process. Each Journalet emulates normal operations: create
|
|
/// a new pair of ServiceContract, deliver an endpoint through the
|
|
/// ServiceProviderContract, then interacts with the service process.
|
|
///
|
|
/// Each Journalet has its own thread to replay the log, so they are
|
|
/// running concurrently.
|
|
/// </summary>
|
|
protected virtual void RecoveryThread()
|
|
{
|
|
Journalet journalet;
|
|
ServiceContract.Exp:Start! ep;
|
|
ServiceProviderContract.Imp:Start! provider;
|
|
|
|
//DebugStub.Print("JP: ENTER Recovery Thread\n");
|
|
|
|
// Lock the server. This blocks new clients to connect the service
|
|
// process.
|
|
provider = providerRef.Acquire();
|
|
lock (journaletListLock) {
|
|
foreach (Object obj in journaletList) {
|
|
if (obj == null) {
|
|
break;
|
|
}
|
|
journalet = obj as Journalet;
|
|
if (journalet == null) {
|
|
continue;
|
|
}
|
|
|
|
journalet.CreateServerEndpoint(out ep);
|
|
|
|
provider.SendConnect(ep);
|
|
switch receive {
|
|
case provider.AckConnect():
|
|
break;
|
|
case provider.NackConnect(rejected):
|
|
delete rejected;
|
|
DebugStub.Print("JP Recovery Th: " +
|
|
"Provider connection rejected\n");
|
|
break;
|
|
case provider.ChannelClosed():
|
|
DebugStub.Print("Server lost during recovery." +
|
|
" Break,\n");
|
|
DebugStub.Break();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
// unlock the server
|
|
providerRef.Release(provider);
|
|
|
|
providerThread = new Thread(new ThreadStart(ProviderThread));
|
|
providerThread.Start();
|
|
|
|
//DebugStub.Print("JP: EXIT Recovery Thread\n");
|
|
}
|
|
}
|
|
}
|