tsJensen

A quest for software excellence...

Distributed Parallel Processing in Simple .NET Console Application

You write a simple console application. Debug it locally. And flip a switch and run it on all your servers and workstations running MpiVisor Server. This demo is part of the original source on GitHub. It is has a small Main class, a "master" runner class and a "spawned" runner class. More on how it works in the next post. For now, just enjoy how easy the code is. Read on to see the code.

static void Main(string[] args)
{
    //connect agent and dispose at end of execution
    //use forceLocal to run in a single process with internal visor
    using (Agent.Connect(forceLocal: false)) 
    {
        //default is File only - spawned agents shuttle logs back to master
        Log.LogType = LogType.Both; 
        if (Agent.Current.IsMaster)
        {
            try
            {
                //keep Main clean with master message loop class
                MasterRunner.Run(args);
            }
            catch (Exception e)
            {
                Log.Error("Agent master exception: {0}", e);
            }
        }
        else
        {
            try
            {
                //keep Main clean with spawned agent message loop class
                SpawnRunner.Run(args);
            }
            catch (Exception e)
            {
                Log.Error("spawn agent exception: {0}", e);
                Agent.Current.Send(new Message(Agent.Current.SessionId, 
                    Agent.Current.AgentId,
                    MpiConsts.MasterAgentId, 
                    SystemMessageTypes.Aborted, 
                    e.ToString()));
            }
        }
    }
} //standard ending - will force service to kill spawned agents  


internal static class MasterRunner
{
    private static ushort numberOfAgentsToSpawn = 2;

    //use to stop message processing loop
    private static bool continueProcessing = true;

    //additional means of determining when to stop processing loop
    private static ushort spawnedAgentsThatHaveStoppedRunning = 0;

    public static void Run(string[] args)
    {
        //spawn worker agents, send messages and orchestrate work
        Agent.Current.SpawnAgents(numberOfAgentsToSpawn, args);
        Message msg;
        do
        {
            msg = Agent.Current.ReceiveAnyMessage();
            if (msg == null) continue; //we timed out
            switch (msg.MessageType)
            {
                //handle content types > -1 which are application specific
                //case 0-~:
                    //handle messages from master or other agents here
                    //break;
                case 2:
                    //handle messages from master or other agents here
                    Log.Info("AgentId {0} sent message type 2 with {1}", 
                        msg.FromId, msg.Content);

                    //this test/demo just sends the message back to the sender
                    Agent.Current.Send(new Message
                        {
                            FromId = Agent.Current.AgentId,
                            SessionId = Agent.Current.SessionId,
                            ToId = msg.FromId,
                            MessageType = SystemMessageTypes.Shutdown,
                            Content = null
                        });
                    break;

                //handle internal messages and unknown
                case SystemMessageTypes.Started:
                    Log.Info("AgentId {0} reports being started.", msg.FromId);
                    //send demo/test content message
                    Agent.Current.Send(new Message
                    {
                        FromId = Agent.Current.AgentId,
                        SessionId = Agent.Current.SessionId,
                        ToId = msg.FromId,
                        MessageType = 1,  //custom app type
                        Content = "hello from 1"
                    });
                    break;
                case SystemMessageTypes.Stopped:
                    Log.Info("AgentId {0} reports being stopped.", msg.FromId);
                    spawnedAgentsThatHaveStoppedRunning++;
                    break;
                case SystemMessageTypes.Aborted:
                    Log.Info("AgentId {0} reports being aborted.", msg.FromId);
                    spawnedAgentsThatHaveStoppedRunning++;
                    break;
                case SystemMessageTypes.Error:
                    Log.Info("AgentId {0} reports an error.", msg.FromId);
                    break;
                default:
                    Log.Info("AgentId {0} sent {1} with {2}", 
                        msg.FromId, msg.MessageType, msg.Content);
                    break;
            }
        }
        while (continueProcessing 
            && spawnedAgentsThatHaveStoppedRunning < numberOfAgentsToSpawn);
        
        //change while logic as desired to keep master running 
        //or shut it down and all other agents will as well
        Log.Info("done master");
    }
	
	
internal static class SpawnRunner	
{	
    private static bool continueProcessing = true;	

    public static void Run(string[] args)
    {
        Message msg;
        do
        {
            msg = Agent.Current.ReceiveAnyMessage();
            if (msg == null) continue; //we timed out
            switch (msg.MessageType)
            {
                //handle content types > -1 which are application specific
                case 1:
                    //handle messages from master or other agents here
                    Log.Info("AgentId {0} sent message type 1 with {1}", 
                        msg.FromId, msg.Content);

                    //this test/demo just sends the message back to the sender
                    Agent.Current.Send(new Message
                        {
                            FromId = Agent.Current.AgentId,
                            SessionId = Agent.Current.SessionId,
                            ToId = msg.FromId,
                            MessageType = 2,
                            Content = msg.Content.ToString() + " received"
                        });
                    break;

                //handle internal messages and unknown
                case SystemMessageTypes.Shutdown:
                    Log.Info("AgentId {0} sent shut down message", msg.FromId);
                    continueProcessing = false;
                    break;
                default:
                    Log.Info("AgentId {0} sent {1} with {2}", 
                        msg.FromId, msg.MessageType, msg.Content);
                    break;
            }
        }
        while (continueProcessing);
        Log.Info("work done");
    }
}

Distributed Parallel Computing for .NET with DuoVia.MpiVisor

I have not posted on my blog for some time. I’ve been busy. Work. Family. And DuoVia.Net. DuoVia Inc. has been my little corp-to-corp contracting vehicle which largely lays dormant while I’m employed in any other arrangement. I have no plans to change my current employment arrangement but I am transforming DuoVia into an open source services and tools for .NET organization.

It has taken a large number of weekends and evenings to pull it off, but I’m am now very excited to present DuoVia’s first three open source projects, new web site, and new vision. The site is nothing really to brag about. It is simple and clean. The vision is evolving. But the three very cool projects are something else.

DuoVia.MpiVisor
Don’t let anybody lie to you. Doing distributed parallel computing (DPC) on HPC or Hadoop or MPI can be very hard. Very complex. And for a advanced DPC systems that will not likely change.

But let’s face it. Many of the chores we need a distributed computing system for are small, down and dirty, one-off projects where something hard needs to be done once or perhaps occasionally. But the problem just doesn’t merit the hard work or the budget required to use an enterprise class system. So often as not the project just doesn’t get done or you solve the problem in a far less efficient and satisfying way.

You can get the “agent” library as a NuGet package. This will give you everything you need to write and debug your DPC application in Visual Studio without having to deploy it to your distributed processing servers or workstations. Once you’re ready to rock and roll, just get the code for the server at GitHub and run it on as many machines and you need.

That’s where DuoVia.MpiVisor comes in. Read more on the new web site www.duovia.net.

DuoVia.Net
Did you ever need to just write a simple interface, a couple of DTOs and a simple implementation of service, stand it up and use it without any configuration fuss? Don’t want to bother configuring WCF or spinning up a RESTful interface just to have to .NET apps exchange some data fast and easy? Look no further. Get the source code at GitHub or download the NuGet package today.

DuoVia.FuzzyStrings
Do you need to know if “Jensn” is probably equal to “Jensen”? Then the DuoVia.FuzzyStrings library is the perfect choice. You can download the source at GitHub or get the NuGet package.

Demo and Test Code on GitHub
If you want to learn how to use these libraries before I have time to fully document them and publish that on the site, please do go to GitHub and download the source. In each project you will find the source for a demo or test which shows just how easy these libraries are to use.

Apologies for the sales pitch, but I really do want you to visit the new web site and check it all out.

Of course I will continue to blog here about various and random .NET development topics. But I invite you to check out the new web site and more especially to give the new projects a try. You may find them useful and when you do, I hope to hear from you about your triumphs.

SQLite on Visual Studio with NuGet and Easy Instructions

My most popular blog post ever was entitled SQLite on Visual Studio 2010 Setup Instructions which I wrote nearly 18 months ago. A lot has changed since then but the page is still as popular as ever. Here is the replacement for that antiquated post.

I’m now using Visual Studio 2012 at home. I’m still using Visual Studio 2010 at work. I have tried the latest x86/x64 SQLite NuGet package on both and it works very well. So forget about my post from 18 months ago and read on.

If you don’t have the NuGet Package Manager installed, go get it and get it installed.

For testing purposes, create a simple console application. If you’re like me, you’ll change the project platform target so that it will build for Any CPU. And if you’re using VS 2012, you’ll uncheck the “Prefer 32-bit” checkbox so that your console app will run as an x64 app in your environment. Unless, of course, if you are running in an x86 OS, you will want to stick with that. (As an aside, if you are still running in 32-bit mode, I recommend you upgrade.)

Now comes the fun part. Illustrated and very easy instructions. Don’t worry, we’ll get to some code later on.

First, right click on your project in the Solution Explorer and select Manage NuGet Packages…

sqlite_nuget_01

In the Package Manager dialog, click the “Online” tab on the left and then search for “sqlite”. You’ll see something like this:

sqlite_nuget_02

Select the (x86/x64) package for maximum flexibility. You will end up with two folders in your project with the appropriate interop assemblies in each. Those files are marked as “Content” and “Copy always” in your project. In this way, you no longer have to worry about which platform your application is running on. Here’s what it looks like:

sqlite_nuget_03

Save and build. You are now ready for some real code. Yes. It is that easy. So much easier than 18 months ago.

Now here’s the test code. This code shows you two approaches in Test1() and Test2() methods. Test1 uses the SQLiteFactory to create a standard System.Data.Common.DbConnection object so that you can plug in your standard ADO.NET code. Test2 uses the System.Data.SQLite namespace and its SQLiteConnection, SQLiteCommand, SQLiteParameter and SQLiteDataReader classes.

using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace SQLiteExperiment
{
  class Program
  {
    static void Main(string[] args)
    {
      var dbFile = @"D:\SQLData\SQLite\test.db";
      if (File.Exists(dbFile)) File.Delete(dbFile);

      var connString = string.Format(@"Data Source={0}; Pooling=false; FailIfMissing=false;", dbFile);

      //test using System.Data.Common and SQLiteFactory
      Test1(connString);

      if (File.Exists(dbFile))
        Console.WriteLine("Test1 succeeds");
      else
        Console.WriteLine("Test1 fails.");

      //prepare for test 2 using SQLite
      //Note: If Pooling=true in connection string the 
      //      File.Delete will fail with IOException - The process cannot
      //      access the file because it is being used by another process.
      if (File.Exists(dbFile)) File.Delete(dbFile);

      
      Console.WriteLine(string.Empty);

      //test using System.Data.SQLite namespace including SQLiteConnection
      Test2(connString);

      if (File.Exists(dbFile))
        Console.WriteLine("Test2 succeeds");
      else
        Console.WriteLine("Test2 fails.");

      Console.ReadLine(); //prevent close while in debug
    }

    static void Test1(string connString)
    {
      Console.WriteLine("Begin Test1");
      using (var factory = new System.Data.SQLite.SQLiteFactory())
      using (System.Data.Common.DbConnection dbConn = factory.CreateConnection())
      {
        dbConn.ConnectionString = connString;
        dbConn.Open();
        using (System.Data.Common.DbCommand cmd = dbConn.CreateCommand())
        {
          //create table
          cmd.CommandText = @"CREATE TABLE IF NOT EXISTS T1 (ID integer primary key, T text);";
          cmd.ExecuteNonQuery();

          //parameterized insert
          cmd.CommandText = @"INSERT INTO T1 (ID,T) VALUES(@id,@t)";
          
          var p1 = cmd.CreateParameter();
          p1.ParameterName = "@id";
          p1.Value = 1;

          var p2 = cmd.CreateParameter();
          p2.ParameterName = "@t";
          p2.Value = "test1";

          cmd.Parameters.Add(p1);
          cmd.Parameters.Add(p2);

          cmd.ExecuteNonQuery();

          //read from the table
          cmd.CommandText = @"SELECT ID, T FROM T1";
          using (System.Data.Common.DbDataReader reader = cmd.ExecuteReader())
          {
            while (reader.Read())
            {
              long id = reader.GetInt64(0);
              string t = reader.GetString(1);
              Console.WriteLine("record read as id: {0} t: {1}", id, t);
            }
          }
          cmd.Dispose();
        }
        if (dbConn.State != System.Data.ConnectionState.Closed) dbConn.Close();
        dbConn.Dispose();
        factory.Dispose();
      }
      Console.WriteLine("End Test1");
    }

    private static void Test2(string connString)
    {
      Console.WriteLine("Begin Test2");

      using (var dbConn = new System.Data.SQLite.SQLiteConnection(connString))
      {
        dbConn.Open();
        using (System.Data.SQLite.SQLiteCommand cmd = dbConn.CreateCommand())
        {
          //create table
          cmd.CommandText = @"CREATE TABLE IF NOT EXISTS T1 (ID integer primary key, T text);";
          cmd.ExecuteNonQuery();

          //parameterized insert - more flexibility on parameter creation
          cmd.CommandText = @"INSERT INTO T1 (ID,T) VALUES(@id,@t)";

          cmd.Parameters.Add(new System.Data.SQLite.SQLiteParameter 
            { 
              ParameterName = "@id", 
              Value = 1 
            });

          cmd.Parameters.Add(new System.Data.SQLite.SQLiteParameter
          {
            ParameterName = "@t",
            Value = "test2"
          });

          cmd.ExecuteNonQuery();

          //read from the table
          cmd.CommandText = @"SELECT ID, T FROM T1";
          using (System.Data.SQLite.SQLiteDataReader reader = cmd.ExecuteReader())
          {
            while (reader.Read())
            {
              long id = reader.GetInt64(0);
              string t = reader.GetString(1);
              Console.WriteLine("record read as id: {0} t: {1}", id, t);
            }
          }
        }
        if (dbConn.State != System.Data.ConnectionState.Closed) dbConn.Close();
      }
      Console.WriteLine("End Test2");
    }
  }
}

This code is ultra simplistic but illustrates exactly how easy it is use to use SQLite after “installing” the NuGet package. I have not tried to use a visual designer with this package. I’ll leave that discussion to another day.

DomainAspects: Aspect Oriented Programming with Castle Windsor Interceptor

Last week I released DomainAspects 2.0 without many details other than its general features. In this post and future posts, I’m going to delve into the details one area or feature at a time. My purpose is to help you understand how each piece works and provide examples of how you might use the library or the code in your own projects.

The few aspect oriented programming (AOP) languages and frameworks that I have looked at seem to me to be unwieldy and unnecessarily intrusive in the the daily workflow of an application developer, cluttering up the code and creating cumbersome dependencies on the AOP construct itself. Fortunately there is a cleaner alternative to AOP languages found in the inversion of control (IoC) containers we so commonly use for dependency injection in order to enjoy the benefits of SOLID Design: the Interceptor.

Interception is supported by many of the IoC libraries. I’ve used Unity a little but most of my experience is with Castle Windsor, so of course, I prefer Windsor. I also prefer Windsor’s interception approach because I find it easier. If you are a Unity user, take a look at Dino Esposito’s Unity Interception article on MSDN. You an see for yourself that the Unity IInterceptionBehavior implementation is relatively more complex than Windsor’s IInteceptor:

public interface IInterceptor
{
   void Intercept(IInvocation invocation);
}

In DomainAspects, I wanted my Interceptor to handle logging and exception handling, which I like to call “auditing”, probably the most common of the cross cutting concerns handled by AOP. I also wanted the Interceptor to handle authorization security. But I wanted to have a simple Interceptor class that would have its “auditor” and “authorization” dependencies injected by the same container. Each dependency would handle its specific responsibilities.

Here’s the Windsor fluent configuration code for each of these. Note the first component is the IPrincipalProvider which is a dependency of the IOperationAuthorizer implementation. I then register the components for the authorizer and the auditor. The last component configured by the DomainAspects ServiceFactory class is the OperationInterceptor.

_container.Register(
   Component.For<IPrincipalProvider>()
      .ImplementedBy(_principalProviderType)
      .Named("PrincipalProvider")
      .LifeStyle.Transient,

   Component.For<IOperationAuthorizer>()
   .ImplementedBy(_operationAuthorizerType)
   .Named("OperationAuthorizer")
   .LifeStyle.Transient,

   Component.For<IOperationAuditor>()
   .ImplementedBy(_operationAuditorType)
   .Named("OperationAuditor")
   .LifeStyle.Transient,

   Component.For<OperationInterceptor>()
   .Named("OperationInterceptor")
   .LifeStyle.Transient);

When the DomainAspects ServiceFactory’s Create method is called, if the service has not already been registered with the Windsor container, it will register the container and wrap it with the Interceptor we have just configured using Windsor’s fluent interface. Here’s the code:

_container.Register(
   Component.For(interfaceType)
   .ImplementedBy(implementedByType)
   .Named(implementedByType.Name)
   .Interceptors(InterceptorReference.ForKey("OperationInterceptor")).Anywhere
   .LifeStyle.PerThread);

Note that the .Interceptors method takes a params of InterceptorReference objects. So like Unity, you can chain interception. Unlike Unity, the chaining occurs automatically for you in sequence of the array. Your IInterceptor implementation does not require you to be concerned with wiring up the chain as does the Unity IInterceptorBehavior.

Now take a look at the relatively simple, application requirement focused OperationInterceptor class which implements the Windsor IInterceptor interface with a single method and depends on the container to inject its two dependencies. The code in the interceptor class here just handles the business case of examining the class and method being intercepted for attributes that will require action by one or both of its dependencies.

public class OperationInterceptor : IInterceptor   
{
   private IOperationAuditor auditor;
   private IOperationAuthorizer authorizer;

   public OperationInterceptor(IOperationAuditor opAuditor, IOperationAuthorizer opAuthorizer)
   {
      auditor = opAuditor;
      authorizer = opAuthorizer;
   }

   public void Intercept(IInvocation invocation)
   {
      AutoLogAttribute autoLogAttribute = null;
      AuthorizeAttribute authorizeAttribute = null;
      var methodInfo = invocation.MethodInvocationTarget;
      object invokedKey = null;
      try
      {
         //find AutoLogAttribute
         autoLogAttribute = ((AutoLogAttribute[])methodInfo
            .GetCustomAttributes(typeof(AutoLogAttribute), true))
            .FirstOrDefault();

         if (null == autoLogAttribute) //try class level
            autoLogAttribute = ((AutoLogAttribute[])methodInfo.DeclaringType
            .GetCustomAttributes(typeof(AutoLogAttribute), true))
            .FirstOrDefault();

         if (null != autoLogAttribute &&  null != auditor)
         {
            //log invocation
            auditor.LogOperationInvoked(methodInfo, 
               invocation.Arguments, 
               invocation.GenericArguments, 
               out invokedKey);
         }

         //find AuthorizeAttribute
         authorizeAttribute = ((AuthorizeAttribute[])methodInfo
            .GetCustomAttributes(typeof(AuthorizeAttribute), true))
            .FirstOrDefault();

         if (null == autoLogAttribute) //try class level
            authorizeAttribute = ((AuthorizeAttribute[])methodInfo.DeclaringType
            .GetCustomAttributes(typeof(AuthorizeAttribute), true))
            .FirstOrDefault();

         if (null != authorizeAttribute && null != authorizer)
         {
            //authorize
            authorizer.Authorize(methodInfo, invocation.Arguments, invocation.GenericArguments);
         }

         //proceed with invocation
         invocation.Proceed();
      }
      catch(Exception e)
      {
         //log exception and throw
         if (null != autoLogAttribute && null != auditor)
         {
            auditor.LogOperationException(invokedKey, methodInfo, e);
         }
         throw; //always throw to bubble up exception and preserve call stack
      }
   }
}

If you are considering aspect oriented programming or you need interception for another reason, I hope this post is helpful. If you are looking through the DomainAspects 2.0 library code, I hope this will help you understand how and why I’ve used Windsor’s IoC container and their interception facility.

DomainAspects 2.0: Aspect Oriented Library with Generic WCF Host

Would you like to just focus on your business domain and host it anywhere easily rather than fussing with infrastructure? With DomainAspects you just write the interface in a project called [MyDomain].Common and the implementation is a project called [MyDomain]. Now add a brief and easy configuration string to both client and service host and xcopy deploy. Restart your hosting service if you are using the domain remotely.

With DomainAspects, all access to the implementation is now controlled as you have specified through easy attributes on your implementation without ever writing specific code to validate and authorize. Each and every call is logged without adding any logging code. Each and every exception is logged without ever adding a try/catch block in your code. And exceptions are captured, logged, and thrown up the chain to the client.

Introducing the 2.0 version of DomainAspects. This new version contains some major changes, the chief among them is the introduction of a WCF host container that allows you to access your aspect oriented domain layer remotely. More on the changes and features later, but first, let’s review what has come before and contributed to this new release:

And some earlier posts that have formed the creation of the WCF host in this new version:

DomainAspects 2.0 Introduced Today

Over the last couple of months, I’ve spend considerable time on the weekends to complete the idea of merging my previous work on a generic WCF host into my DomainAspects project. Since I had not touched either of these projects for some time, it was fun and interesting to look at them from a new perspective. There are many changes for the better here. Please download it and give it a try. I am eager to hear your feedback.

New in DomainAspects 2.0

Today, I’m releasing DomainAspects 2.0 which supports the following new features:

  • A WCF host service for accessing domain services running in another process or server with the following features/benefits:
    • Fast and encrypted net.tcp binding with Windows permissions.
    • Simplified custom configuration section that is the same on client and server.
    • Custom instance provider uses the DomainAspects service factory and Windsor for instance creation and release.
    • Ability to specify service instance lifestyle closely matching Windsor (PerThread, Pooled, Singelton, SingleCall).
    • Service host that will run in console mode for easy debugging.
    • Flip a config switch and install the service host as a windows service. No recompile required.
    • RemoteClient implements IDisposable correctly, dealing with Abort vs Close for you.
    • Host can be configured to use a fake PrincipalProvider for easily testing a variety of user roles and combinations.
    • Reliance on DomainAspects for authorization rather than configured local Windows groups.
  • A light remote client assembly allows access DomainAspects with minimal deployment requirements.
  • Local client and remote client both return an instance of the service interface, allowing a domain to be moved to another process or remote server with minimal changes to client code.
  • Upgrade to the current stable release Castle Windsor 3.0 container libraries.
  • Improved domain service factory allows greater instance creation control.
  • Additional tests, including multi-threaded remote tests focusing on instance creation and pooling.
  • More intuitive, self-describing classes to make understanding the code easier.

Using DomainAspects 2.0

Using the DomainAspects solution is easy. You must have Visual Studio 2010 Pro or higher. If you do not, you may spend considerable time trying to get it running. I would not recommend it.

Download the code and make sure you right click the zip file and go to properties and unlock the zip file before you extract it.

To run remote tests, be sure to start the DomainAspects.Host without debugging first. I recommend running the MultiRemoteTests class using a multiple project startup with the host running first and the DomainAspects.TestConsole running the test rather than the automated tests runner since I have not found a way to get those tests to run properly within the test harness.

Now run the tests. Watch the host console as the MultiRemoteTests run. Note the instance creation on second and third run take zero time because these instances have been pooled.

When you are ready to build your own domain service, just follow the examples in the TestDomain and TestDomain.Common projects. Configure the host and the client as shown in the app.config files in the included projects appropriately to match your new domain and domain.Common assemblies.

If you are going remote, build and deploy the DomainAspects.Host. Make sure you switch the “consoleMode” attribute in the config on the server side to “Off” so that it will install and run as a service.

Download DomainAspects20.zip (2.10 mb)

Facebook Sucks Away Too Much Time

I have removed Facebook mobile apps from my mobile devices. It was really just occupying too much of my time. I am now considering removing my account entirely. I am finding less and less value in the increasing amount of time I spend on the site. what about you? Have you abandoned Facebook too. Let me know why and how leaving Facebook has affected your life.

Choosing a Content Management System

Growth in the content management system (CMS) market is strong as more and more companies look for software to help them manage their burgeoning coffers of content. This growth is partly fed by the growing temptation to use these systems for purposes far beyond managing content with CMS vendors tacking on every new feature they can dream up to feed this frenzy. The result is often a bloated, unwieldy, unmanageable, complexity induced bug-infested mess.

I have a relatively high level of interest in this space given my brief professional experience in this market. So I read with interest a post today on the subject by one of my favorite software development authors, Martin Fowler. In his post entitled Editing Publishing Separation Fowler discusses the advantages of separating the editing data and experience from the publishing experience and published data. I invite you to read his article as he makes a very cogent argument which should be taken into account as you consider choosing a CMS.

Before he makes this point, he sets up his theme with this preamble:

"...a regular theme has been the growing impact of content management systems (CMS). They aren't usually seen as helpful, indeed there is a clear sign that they are becoming a worryingly invasive tool - used for more than their core purpose in such a manner that they hinder overall development."

It is precisely this thought which accurately represents my own limited experience in the CMS market. So here are my recommendations for consideration when choosing a CMS for your content management purposes.

  • As Martin suggests, look for a separate editing and publishing experience including separation of data and the advantages that can come from that.
  • And pay close attention to Martin’s opening quoted above. When looking at a feature, such as e-Commerce, consider whether you should be looking at a full featured e-Commerce platform rather than a feature that may have been cobbled onto the product as an afterthought. Remember that your primary purpose should be the management of content.
  • Whether you choose open or closed, open source or commercial, be sure that your own development team can understand and use the system with alacrity.
  • If you have a PHP team, don’t buy a .NET based CMS. And vice versa.
  • Take your time in evaluation. A quick, under-informed decision can cost you far more than you can imagine in future headaches.
  • Give your development team time to find the real problems with the CMS being evaluated and determine whether and how they can live with the problems they encounter.
  • Be sure to test the support and services teams of the vendor if you are going commercial over open source. Or if you are going open source, look for a proven and reliable partner who can guide you through the rough patches. Because no matter which CMS you choose, there will be some of those.
  • Consider licensing and support costs but don’t fall into the trap that open source is free. The efficiency of your own development and content management team using the CMS over the long haul will have a far larger impact on your overall cost of ownership.
  • Ask someone from Missouri to join your evaluation team. In other words, NEVER just accept the word of a sales representative that the CMS she is selling has the feature that you want and that it works as advertised. Like all software, there is a HUGE amount of advertising fluff and endless feature lists. Make sure that you have the sales team “show” you and then make sure that your own development team can make it work without their help and without having to contact support before you accept the claim, “Oh yes, our CMS does that.”
  • And last but not least, do your own research of satisfied and dissatisfied customers. Make some calls. Ask to speak to someone on the evaluation team where another product was chosen over the one you are considering. Ask if they evaluated the product you have your sights set on. Chances are that a few phone calls will land you a gold mine in helpful information.

Of course, you can apply these same principles to purchasing any enterprise software. And you should.

Learning from the Elevator State Machine Programming Test

In a recent job interview I was asked to build an elevator simulator. I was given the weekend to do it. I had never written a state machine before. It wasn't perfect but it passed all the tests I could come up with based on my own assumptions about the rules of behavior based on the following text from the interviewer:

Build an elevator simulator. It should have a UI that shows 5 floors, with the call buttons for each floor. The UI should also show the panel of buttons inside the elevator. As a user, I can press any floor call button (up or down), as well as elevator panel buttons to move the elevator. The elevator should move at a realistic speed (not just go instantaneously to the desired floor).

Evaluation criteria:

  1. Accuracy of the elevator state machine with various button-press scenarios.
  2. The object design of your project – how you organize the design into classes and their relationships, what design patterns you choose to use.
  3. Coding style and clarity.
  4. The UI doesn’t need to be fancy, but it needs to enable the functional requirements stated above. You can use any UI toolkit of your choice.
  5. The code should be written in C#.

Unfortunately, my assumptions were not the same as the evaluation team's assumptions, so they found some "bugs" in the behavior of my elevator. Honestly, I was not surprised. In fact, when I submitted the code, I had offered the following observations:

I did not spend much time refactoring or documenting, so you're getting to see the "first draft" of the code.

Some refactoring that I would do:

1. Create tighter assurances in the tests with required sequence of logged events rather. In running the tests, I took a shortcut and debugged through the tests to review the log entries from the state machine to verify the sequence of actions. This should be corrected.

2. Clean up each of the two state machines, AtFloor and Moving. To simplify, I might create one or two more state machine classes to make the logic rules easier to follow and remove some of the "if" fall through logic states.

3. Tighten up UI with view data model to reduce the clutter in the code or perhaps even rewrite the UI in WPF with an MVVM architecture. My choice of Windows Forms was based on time limitations and my limited experience with WPF.

4. Review and tighten up naming and coding conventions for improved readability.

The "bug" in the elevator was not the reason I did not get the job. Fortunately, the recruiter sent me the feedback she received from the interview team. This is rare. I found it helpful and will certainly take it into account in the future.

The positive and exciting thing about this entire experience was having a reason to write my first state machine in a fun exercise which I want to share with you here. This was certainly the most comprehensive programming test I've ever been given in conjunction with an interview. I applaud the employer for having such a thorough and extensive evaluation process. And I thank the interview team for their feedback. Such thoroughness really helps to determine whether a person will be a good fit for the team. And above all else, that fit is of paramount importance.

Please download the code and let me know what you think. There are clearly flaws, as I would expect of any code thrown together over a weekend, especially where the particular pattern being implemented is a first attempt. When I have time, I may want to pull this off the shelf and do the refactoring I suggested when I submitted the code. But first, I need to find an employer and settle down. I think that will happen soon.

ElevatorControl.zip (29.94 kb)