[Mulgara-svn] r1152 - in trunk/src/jar: . store-stringpool-xa11 store-stringpool-xa11/java store-stringpool-xa11/java/org store-stringpool-xa11/java/org/mulgara store-stringpool-xa11/java/org/mulgara/store store-stringpool-xa11/java/org/mulgara/store/stringpool store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11

pag at mulgara.org pag at mulgara.org
Sat Aug 23 00:43:12 UTC 2008


Author: pag
Date: 2008-08-22 17:43:11 -0700 (Fri, 22 Aug 2008)
New Revision: 1152

Added:
   trunk/src/jar/store-stringpool-xa11/
   trunk/src/jar/store-stringpool-xa11/build.xml
   trunk/src/jar/store-stringpool-xa11/java/
   trunk/src/jar/store-stringpool-xa11/java/org/
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/BlankNodeAllocator.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataAVLComparator.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryAVLComparator.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryTypeAVLComparator.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataStruct.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolFactory.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImpl.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImplTest.java
   trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolLoadTest.java
   trunk/src/jar/store-stringpool-xa11/store-stringpool-xa11-build.properties
Log:
A new take on the string pool. This version uses an integrated node pool (and not a free list) and is designed around write-once-read-many instead of working this way accidentally. So far it operates marginally quicker, though the real test will be in loading very large data sets

Added: trunk/src/jar/store-stringpool-xa11/build.xml
===================================================================
--- trunk/src/jar/store-stringpool-xa11/build.xml	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/build.xml	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,153 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<!DOCTYPE project>
+
+<!-- =================================================================== -->
+<!--                       Project definition                            -->
+<!-- =================================================================== -->
+<project name="store-stringpool-xa11" default="store-stringpool-xa11-jar" basedir="../../..">
+
+  <!-- =================================================================== -->
+  <!--                      Property Definitions                           -->
+  <!-- =================================================================== -->
+
+  <!-- =================================================================== -->
+  <!--                              Imports                                -->
+  <!-- =================================================================== -->
+
+  <!-- =================================================================== -->
+  <!--                          Path Definitions                           -->
+  <!-- =================================================================== -->
+  <path id="store-stringpool-xa11-classpath">
+
+    <path refid="common-classpath"/>
+
+    <fileset file="${query.dist.dir}/${query.jar}"/>
+    <fileset file="${resolver-spi.dist.dir}/${resolver-spi.jar}"/>
+    <fileset file="${tuples.dist.dir}/${tuples.jar}"/>
+    <fileset file="${util.dist.dir}/${util.jar}"/>
+    <fileset file="${util-xa.dist.dir}/${util-xa.jar}"/>
+  </path>
+
+  <path id="store-stringpool-xa11-test-classpath">
+
+    <path refid="store-stringpool-xa11-classpath"/>
+
+    <fileset file="${store-stringpool-xa11.dist.dir}/${store-stringpool-xa11.jar}"/>
+  </path>
+
+  <target name="store-stringpool-xa11-clean"
+          description="Removes all compile generated files for the store-stringpool-xa11">
+
+    <tstamp/>
+
+    <delete dir="${store-stringpool-xa11.obj.dir}"/>
+    <delete dir="${store-stringpool-xa11.test.dir}"/>
+    <delete dir="${store-stringpool-xa11.dist.dir}"/>
+  </target>
+
+  <target name="-store-stringpool-xa11-prepare"
+          description="Creates all directories associated with the
+                       store-stringpool-xa11's compilation"
+          depends="-prepare-build">
+
+    <mkdir dir="${store-stringpool-xa11.obj.dir}"/>
+    <mkdir dir="${store-stringpool-xa11.test.dir}"/>
+    <mkdir dir="${store-stringpool-xa11.dist.dir}"/>
+    <mkdir dir="${store-stringpool-xa11.obj.dir}/classes"/>
+  </target>
+
+  <target name="store-stringpool-xa11-compile"
+          depends="-store-stringpool-xa11-prepare, 
+                   resolver-spi-jar, util-xa-jar"
+          description="Compiles all store-stringpool-xa11 related files included
+                       generated source code">
+
+    <javac destdir="${store-stringpool-xa11.obj.dir}/classes" debug="on"
+           deprecation="on" source="1.5">
+
+      <classpath refid="store-stringpool-xa11-classpath"/>
+
+      <src path="${store-stringpool-xa11.src.dir}/java"/>
+    </javac>
+  </target>
+
+  <target name="store-stringpool-xa11-jar"
+          depends="store-stringpool-xa11-compile, -store-stringpool-xa11-jar-uptodate"
+          unless="store-stringpool-xa11.jar.uptodate"
+          description="Builds the memory-backed node pool JAR">
+
+    <jar jarfile="${store-stringpool-xa11.dist.dir}/${store-stringpool-xa11.jar}">
+      <fileset dir="${store-stringpool-xa11.obj.dir}/classes"/>
+    </jar>
+  </target>
+
+  <target name="-store-stringpool-xa11-jar-uptodate">
+
+    <uptodate property="store-stringpool-xa11.jar.uptodate"
+              targetfile="${store-stringpool-xa11.dist.dir}/${store-stringpool-xa11.jar}">
+      <srcfiles dir="${store-stringpool-xa11.obj.dir}/classes" includes="**/*"/>
+    </uptodate>
+  </target>
+
+  <target name="store-stringpool-xa11-dist"
+          depends="store-stringpool-xa11-jar"
+          description="Performs all tasks related to finalising this
+                       store-stringpool-xa11 and readying it for distribution">
+
+    <!-- All that's involved in the final version of the store-stringpool-xa11 library
+         is the jar so we don't need to do anything because the dependencies
+         take care of the work. -->
+  </target>
+
+  <target name="store-stringpool-xa11-test"
+          depends="store-stringpool-xa11-jar">
+
+    <antcall target="component-test">
+
+      <param name="classpath.id" value="store-stringpool-xa11-test-classpath"/>
+      <param name="dir" value="${store-stringpool-xa11.src.dir}"/>
+      <param name="jar" value="${store-stringpool-xa11.jar}"/>
+    </antcall>
+  </target>
+
+  <target name="store-stringpool-xa11-loadtest" depends="store-stringpool-xa11-jar">
+    <antcall target="component-loadtest">
+      <param name="classpath.id" value="store-stringpool-xa11-classpath"/>
+      <param name="dir" value="${store-stringpool-xa11.src.dir}"/>
+      <param name="jar" value="${store-stringpool-xa11.jar}"/>
+    </antcall>
+  </target>
+
+  <target name="store-stringpool-xa11-javadoc"
+          depends="store-stringpool-xa11-jar"
+          description="Creates the javadoc for this store-stringpool-xa11">
+
+    <antcall target="javadoc">
+
+      <param name="javadoc.package" value="org.mulgara.store.stringpool.xa.*"/>
+      <param name="javadoc.classpath" value="store-stringpool-xa11-classpath"/>
+      <param name="javadoc.sourcepath" value="${store-stringpool-xa11.src.dir}/java"/>
+    </antcall>
+  </target>
+
+  <target name="store-stringpool-xa11-help"
+          description="Displays the help information for this store-stringpool-xa11">
+
+    <!-- This is similar to the main project's help except the information
+         is specific to this store-stringpool-xa11 -->
+
+    <echo message="Welcome to the build script for ${store-stringpool-xa11.name}."/>
+    <echo message=""/>
+    <echo message="These targets can be invoked as follows:"/>
+    <echo message="  ./build.sh &lt;target&gt;"/>
+    <echo message=""/>
+
+    <java fork="false" classname="org.apache.tools.ant.Main"
+          newenvironment="false">
+
+      <jvmarg value="${arch.bits}"/>
+
+      <arg line="-buildfile ${store-stringpool-xa11.src.dir}/build.xml -projecthelp"/>
+    </java>
+  </target>
+</project>

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/BlankNodeAllocator.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/BlankNodeAllocator.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/BlankNodeAllocator.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,136 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This class manages the allocation and detection of blank nodes.
+ *
+ * @created Aug 15, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class BlankNodeAllocator {
+
+  /** The bit that indicates a blank node. */
+  static final long BLANK_NODE_BIT = 0x8000000000000000L;
+
+  /** The first valid blank node value. */
+  static final long FIRST = 1;
+  
+  /** The next node to be allocated. Initialized to 1, but usually set by the metaroot file. */
+  private long nextNode = FIRST;
+
+  /** The last committed nextNode value. */
+  private long committedNextNode = FIRST;
+
+  /**
+   * The constructor for a new blank node allocator.
+   */
+  public BlankNodeAllocator() {
+  }
+
+
+  /**
+   * Creates a new blank node allocator, initialized from a data buffer.
+   * @param data The data to initialize from.
+   */
+  public BlankNodeAllocator(ByteBuffer data, int offset) {
+    committedNextNode = data.getLong(offset);
+    nextNode = committedNextNode;
+  }
+
+
+  /**
+   * Writes the current state to the current position in a data buffer.
+   * @param data The buffer to write to.
+   */
+  public void writeTo(ByteBuffer data, int offset) {
+    data.putLong(offset, committedNextNode);
+  }
+
+
+  /**
+   * Get the next blank node from this allocator.
+   * @return A GNode for a new blank node.
+   */
+  public synchronized long allocate() {
+    return nextNode++ | BLANK_NODE_BIT;
+  }
+
+
+  /**
+   * Test if a GNode is a blank node.
+   * @param gNode The gNode to test.
+   * @return <code>true</code> if the gNode is for a blank node.
+   */
+  public static boolean isBlank(long gNode) {
+    return (gNode & BLANK_NODE_BIT) != 0;
+  }
+
+
+  /**
+   * Clear all values back to their initialized states.
+   */
+  public void clear() {
+    nextNode = FIRST;
+    committedNextNode = FIRST;
+  }
+
+
+  /**
+   * Get the current internal state.
+   * @return The next node value. This encodes all of the internal state.
+   */
+  public long getCurrentState() {
+    return nextNode;
+  }
+
+
+  /**
+   * Set the internal state. This is just the blank node counter.
+   * @param state The state for this object.
+   */
+  public void setCurrentState(long state) {
+    this.nextNode = state;
+    committedNextNode = state;
+  }
+
+
+  /**
+   * Prepares this object for commiting.
+   * @param metaroot The object that will hold state data on disk.
+   */
+  public void prepare(XA11StringPoolImpl.Metaroot metaroot) {
+    metaroot.setNextBlankNode(nextNode);
+  }
+
+
+  /**
+   * Commits the prepared changes.
+   */
+  public void commit() {
+    committedNextNode = nextNode;
+  }
+
+
+  /**
+   * Go back to the last committed position.
+   */
+  public void rollback() {
+    nextNode = committedNextNode;
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataAVLComparator.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataAVLComparator.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataAVLComparator.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,106 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+
+import org.mulgara.store.stringpool.SPComparator;
+import org.mulgara.store.stringpool.SPObject;
+import org.mulgara.store.xa.AVLComparator;
+import org.mulgara.store.xa.AVLNode;
+
+/**
+ * Comparator for objects in the data pool.
+ *
+ * @created Aug 12, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class DataAVLComparator implements AVLComparator {
+
+  private final SPComparator spComparator;
+  private final int typeCategoryId;
+  private final int typeId;
+  private final ByteBuffer data;
+  private final RandomAccessFile readOnlyFlatFile;
+
+  DataAVLComparator(SPComparator spComparator, SPObject.TypeCategory typeCategory, int typeId, ByteBuffer data, RandomAccessFile flatFile) {
+    this.spComparator = spComparator;
+    this.typeCategoryId = typeCategory.ID;
+    this.typeId = typeId;
+    this.data = data;
+    this.readOnlyFlatFile = flatFile;
+  }
+
+  DataAVLComparator(SPComparator spComparator, DataStruct dataStruct, RandomAccessFile readOnlyFlatFile) {
+    this.spComparator = spComparator;
+    this.typeCategoryId = dataStruct.getTypeCategoryId();
+    this.typeId = dataStruct.getTypeId();
+    this.data = dataStruct.getData();
+    this.readOnlyFlatFile = readOnlyFlatFile;
+  }
+
+  /**
+   * @see org.mulgara.store.xa.AVLComparator#compare(long[], org.mulgara.store.xa.AVLNode)
+   */
+  public int compare(long[] key, AVLNode avlNode) {
+    // NOTE: ignore key.
+
+    // First, order by type category ID.
+    int nodeTypeCategoryId = DataStruct.getTypeCategoryId(avlNode);
+    int c = typeCategoryId - nodeTypeCategoryId;
+    if (c != 0) return c;
+
+    // Second, order by type node.
+    int nodeTypeId = DataStruct.getTypeId(avlNode);
+    if (typeId != nodeTypeId) return typeId < nodeTypeId ? -1 : 1;
+
+    // Finally, defer to the SPComparator.
+    int dataSize = DataStruct.getDataSize(avlNode);
+
+    // Retrieve the binary representation as a ByteBuffer.
+    ByteBuffer nodeData = DataStruct.getDataPrefix(avlNode, dataSize);
+
+    if (dataSize > DataStruct.MAX_DATA_SIZE) {
+      // Save the limit of data so it can be restored later in case it is
+      // made smaller by the comparePrefix method of the spComparator.
+      int savedDataLimit = data.limit();
+
+      data.rewind();
+      nodeData.rewind();
+      c = spComparator.comparePrefix(data, nodeData, dataSize);
+      if (c != 0) return c;
+
+      data.limit(savedDataLimit);
+
+      try {
+        // Retrieve the remaining bytes if any.
+        // Set the limit before the position in case the limit was made
+        // smaller by the comparePrefix method.
+        nodeData.limit(dataSize);
+        nodeData.position(DataStruct.MAX_DATA_SIZE);
+        DataStruct.getRemainingBytes(nodeData, readOnlyFlatFile, DataStruct.getGNode(avlNode));
+      } catch (IOException ex) {
+        throw new Error("I/O Error while retrieving SPObject data", ex);
+      }
+    }
+
+    data.rewind();
+    nodeData.rewind();
+    return spComparator.compare(data, nodeData);
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryAVLComparator.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryAVLComparator.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryAVLComparator.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,42 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import org.mulgara.store.xa.AVLComparator;
+import org.mulgara.store.xa.AVLNode;
+
+/**
+ * Comparator for category information only.
+ *
+ * @created Aug 14, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class DataCategoryAVLComparator implements AVLComparator {
+
+  private final int typeCategoryId;
+
+  DataCategoryAVLComparator(int typeCategoryId) {
+    this.typeCategoryId = typeCategoryId;
+  }
+
+  public int compare(long[] key, AVLNode avlNode) {
+    // NOTE: ignore key.
+
+    // First, order by type category ID.
+    int nodeTypeCategoryId = DataStruct.getTypeCategoryId(avlNode);
+    return typeCategoryId <= nodeTypeCategoryId ? -1 : 1;
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryTypeAVLComparator.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryTypeAVLComparator.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataCategoryTypeAVLComparator.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,52 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import org.mulgara.store.stringpool.SPObject;
+import org.mulgara.store.xa.AVLComparator;
+import org.mulgara.store.xa.AVLNode;
+
+/**
+ * Compares data objects by type and category only.
+ *
+ * @created Aug 14, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class DataCategoryTypeAVLComparator implements AVLComparator {
+
+
+  private final int typeCategoryId;
+  private final int typeId;
+
+  DataCategoryTypeAVLComparator(int typeCategoryId, int typeId) {
+    this.typeCategoryId = typeCategoryId;
+    this.typeId = typeId;
+    assert typeCategoryId == SPObject.TypeCategory.TCID_TYPED_LITERAL;
+  }
+
+  public int compare(long[] key, AVLNode avlNode) {
+    // NOTE: ignore key.
+
+    // First, order by type category ID.
+    int nodeTypeCategoryId = DataStruct.getTypeCategoryId(avlNode);
+    int c = typeCategoryId - nodeTypeCategoryId;
+    if (c != 0) return c;
+
+    // Second, order by type node.
+    int nodeTypeId = DataStruct.getTypeId(avlNode);
+    return typeId <= nodeTypeId ? -1 : 1;
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataStruct.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataStruct.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/DataStruct.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,369 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import org.mulgara.store.nodepool.NodePool;
+import org.mulgara.store.stringpool.SPObject;
+import org.mulgara.store.stringpool.SPObjectFactory;
+import org.mulgara.store.stringpool.SPTypedLiteral;
+import org.mulgara.store.xa.AVLNode;
+import org.mulgara.util.Constants;
+
+/**
+ * Similar to a C-struct for storing and retrieving the data being stored by this string pool.
+ *
+ * @created Aug 12, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+class DataStruct {
+
+  static final int OFFSET_MASK = 0xFFFFFFF8;
+  static final long PADDING_MASK = 0x0000000000000007L;
+  static final int PADDING_MASK_I = 0x00000007;
+  
+  static final int MAX_PADDING = Constants.SIZEOF_LONG - 1;
+  static final byte[] PADDING = new byte[MAX_PADDING];
+
+  static final int IDX_TYPE_CATEGORY_B = 1;  // BYTE offset
+  static final int IDX_TYPE_ID_B = 2;        // BYTE offset
+  static final int IDX_SUBTYPE_ID_B = 3;     // BYTE offset
+  static final int IDX_DATA_SIZE_B = 4;      // BYTE offset
+  static final int IDX_DATA_SIZE_I = 1;      // INT offset
+  static final int IDX_DATA_B = 8;           // BYTE offset
+  static final int IDX_DATA = 1;             // LONG offset
+  static final int IDX_GRAPH_NODE = 9;       // LONG offset. Only used in AVL nodes
+
+  /** The payload size of the AVLNode in longs. */
+  static final int PAYLOAD_SIZE = IDX_GRAPH_NODE + 1;
+
+  /** The maximum number of bytes of SPObject data that will fit in the AVL node. */
+  static final int MAX_DATA_SIZE = (IDX_GRAPH_NODE - IDX_DATA) * Constants.SIZEOF_LONG;
+
+  /** The size of the data before the buffer. */
+  static final int HEADER = IDX_DATA_B;
+
+  /** The type category. */
+  private byte typeCategoryId;
+
+  /** The type ID. */
+  private byte typeId;
+
+  /** The subtype ID. */
+  private byte subtypeId;
+
+  /** The size of the data buffer. */
+  private int dataSize;
+
+  /** The raw data for the object. */
+  private ByteBuffer data;
+
+  /** Indicates that the data buffer is incomplete, and only holds the prefix. */
+  private boolean prefixOnly = false;
+
+  /** The gNode for this data. */
+  private long gNode;
+
+  /**
+   * Creates a new data structure from an object.
+   * @param spObject The object to represent.
+   * @param gNode The gNode for the object.
+   */
+  public DataStruct(SPObject spObject, long gNode) {
+    assert !BlankNodeAllocator.isBlank(gNode);
+    typeCategoryId = (byte)spObject.getTypeCategory().ID;
+    // The type fields in an SPObject all fit into a byte
+    if (spObject.getTypeCategory() == SPObject.TypeCategory.TYPED_LITERAL) {
+      SPTypedLiteral sptl = (SPTypedLiteral)spObject;
+      typeId = (byte)sptl.getTypeId();
+      subtypeId = (byte)sptl.getSubtypeId();
+    } else {
+      typeId = SPObjectFactory.INVALID_TYPE_ID;
+      subtypeId = 0;
+    }
+    data = spObject.getData();
+    dataSize = data.limit();
+    prefixOnly = false;
+    this.gNode = gNode;
+  }
+
+
+  /**
+   * Creates a new data structure from an object, without knowing the gNode.
+   * @param spObject The object to represent.
+   */
+  public DataStruct(SPObject spObject) {
+    this(spObject, NodePool.NONE);
+  }
+
+
+  /**
+   * Reads a data structure from a file at a given offset.
+   * @param file The file to read the structure from.
+   * @param gNode The gNode of the data to read.
+   */
+  public DataStruct(RandomAccessFile file, long gNode) throws IOException {
+    long offset = toOffset(gNode);
+    assert (offset & PADDING_MASK) == 0 : "Bad gNode value: " + gNode;
+    synchronized (file) {
+      file.seek(offset);
+      ByteBuffer header = ByteBuffer.allocate(HEADER);
+      file.read(header.array(), 0, HEADER);
+      if (0 != header.get(0)) throw new IllegalStateException("Bad data found in Data Pool");
+      typeCategoryId = header.get(IDX_TYPE_CATEGORY_B);
+      typeId = header.get(IDX_TYPE_ID_B);
+      subtypeId = header.get(IDX_SUBTYPE_ID_B);
+      dataSize = header.getInt(IDX_DATA_SIZE_B);
+      data = ByteBuffer.allocate(dataSize);
+      file.readFully(data.array());
+    }
+    prefixOnly = false;
+    this.gNode = gNode;
+  }
+
+
+  /**
+   * Reads a data structure from an AVL Node.
+   * @param node The AVL node to read from.
+   */
+  public DataStruct(AVLNode node) {
+    typeCategoryId = getTypeCategoryId(node);
+    typeId = getTypeId(node);
+    subtypeId = getSubtypeId(node);
+
+    // get the data buffer, or its prefix if the buffer is too large
+    dataSize = getDataSize(node);
+    data = getDataPrefix(node, dataSize);
+    prefixOnly = dataSize > MAX_DATA_SIZE;
+
+    gNode = getGNode(node);
+  }
+
+
+  /**
+   * Sets the gNode for an object that does not have the gNode set yet.
+   * @param gNode The new gNode value.
+   * @throws IllegalStateException If the object already has a gNode value.
+   */
+  public void setGNode(long gNode) {
+    if (this.gNode != NodePool.NONE) throw new IllegalStateException("Not allowed to update a GNode on an existing object.");
+    this.gNode = gNode;
+  }
+
+
+  /**
+   * Return an instance the object represented by this structure.
+   * @return A new instance of an SPObject for this structure,
+   *         or <code>null</code> if this is a blank node.
+   */
+  public SPObject getSPObject() {
+    if (prefixOnly) throw new IllegalStateException("Only have the prefix for this object");
+    if (typeCategoryId == SPObject.TypeCategory.TCID_FREE) return null;
+    SPObject.TypeCategory typeCategory = SPObject.TypeCategory.forId(typeCategoryId);
+    return XA11StringPoolImpl.SPO_FACTORY.newSPObject(typeCategory, typeId, subtypeId, data);
+  }
+
+
+  /**
+   * Write this object to a file. This will update the gNode for the object.
+   * @param file The file to write to.
+   * @return The number of bytes written.
+   * @throws IOException Caused by errors writing to the file.
+   */
+  public int writeTo(FileChannel fc) throws IOException {
+    if (prefixOnly) throw new IllegalStateException("Only have the prefix for this object");
+    assert gNode == NodePool.NONE || toOffset(gNode) == fc.position() : "Unexpected gNode value: " + gNode + ". Offset = " + fc.position();
+    // don't update the gNode
+
+    // work out the size
+    int unpaddedSize = HEADER + dataSize;
+    int roundedSize = round(unpaddedSize);
+    ByteBuffer buffer = ByteBuffer.allocate(roundedSize);
+
+    // put the data in
+    buffer.put(IDX_TYPE_CATEGORY_B, typeCategoryId);
+    buffer.put(IDX_TYPE_ID_B, typeId);
+    buffer.put(IDX_SUBTYPE_ID_B, subtypeId);
+    buffer.putInt(IDX_DATA_SIZE_B, dataSize);
+    data.rewind();
+    assert dataSize == data.limit();
+    buffer.position(IDX_DATA_B);
+    buffer.put(data);
+
+    // send it to the file
+    buffer.rewind();
+    fc.write(buffer);
+
+    return roundedSize;
+  }
+
+
+  /**
+   * Writes this object to the payload in an AVLNode.
+   * @param node The node to write to.
+   * @param gNode The gNode this object is associated with.
+   */
+  public void writeTo(AVLNode node) {
+    node.putPayloadByte(IDX_TYPE_CATEGORY_B, typeCategoryId);
+    node.putPayloadByte(IDX_TYPE_ID_B, typeId);
+    node.putPayloadByte(IDX_SUBTYPE_ID_B, subtypeId);
+    node.putPayloadInt(IDX_DATA_SIZE_I, dataSize);
+    // store the head of the buffer
+    data.rewind();
+    // if prefix only then already limitted to the MAX_DATA_SIZE
+    if (!prefixOnly) data.limit(Math.min(dataSize, MAX_DATA_SIZE));
+    node.getBlock().put((AVLNode.HEADER_SIZE + IDX_DATA) * Constants.SIZEOF_LONG, data);
+    // reset the buffer limit if we reduced the limit earlier
+    if (!prefixOnly) data.limit(dataSize);
+    // The graph node at the end is the difference between AVL nodes and the flat file
+    node.putPayloadLong(IDX_GRAPH_NODE, gNode);
+  }
+
+
+  /**
+   * Gets the remaining data of an object into the buffer.
+   * @param file The file to read the data from.
+   */
+  public void getRemainingBytes(RandomAccessFile file) throws IOException {
+    // only need to get more if we only have the prefix
+    if (!prefixOnly) return;
+    // move the limit out to the end
+    data.limit(dataSize);
+    // read the file starting at the data, plus the header, plus the already read portion
+    file.seek(toOffset(gNode) + HEADER + MAX_DATA_SIZE);
+    // read into the buffer, filling at the point where the data had been truncated.
+    int remainingBytes = dataSize - MAX_DATA_SIZE;
+    assert remainingBytes > 0;
+    // we expect read to return everything from a file, so don't use readFully
+    int dataRead = file.read(data.array(), MAX_DATA_SIZE, remainingBytes);
+    if (dataRead != remainingBytes) throw new IOException("Unable to retrieve data from file.");
+  }
+
+
+  /** @return the typeCategoryId */
+  public byte getTypeCategoryId() { return typeCategoryId; }
+
+
+  /** @return the typeId */
+  public byte getTypeId() { return typeId; }
+
+
+  /** @return the subtypeId */
+  public byte getSubtypeId() { return subtypeId; }
+
+
+  /** @return the dataSize */
+  public int getDataSize() { return dataSize; }
+
+
+  /** @return the data */
+  public ByteBuffer getData() { return data; }
+
+
+  /** @return the prefixOnly */
+  public boolean isPrefixOnly() { return prefixOnly; }
+
+
+  /** @return the gNode */
+  public long getGNode() { return gNode; }
+
+
+  /** @return structural aspects to this data, but not the buffer */
+  public String toString() {
+    StringBuilder sb = new StringBuilder("gNode:");
+    sb.append(gNode);
+    sb.append(", typeCategoryId:").append(typeCategoryId);
+    sb.append(", typeId:").append(typeId);
+    sb.append(", subtypeId:").append(subtypeId);
+    sb.append(", dataSize:").append(dataSize);
+    return sb.toString();
+  }
+
+  /** @return The type category from an AVL node. */
+  static byte getTypeCategoryId(AVLNode node) { return (byte)node.getPayloadByte(IDX_TYPE_CATEGORY_B); }
+
+  /** @return The type ID from an AVL node. */
+  static byte getTypeId(AVLNode node) { return (byte)node.getPayloadByte(IDX_TYPE_ID_B); }
+
+  /** @return The sub type ID from an AVL node. */
+  static byte getSubtypeId(AVLNode node) { return (byte)node.getPayloadByte(IDX_SUBTYPE_ID_B); }
+
+  /** @return The data size from an AVL node. */
+  static int getDataSize(AVLNode node) { return node.getPayloadInt(IDX_DATA_SIZE_I); }
+
+  /** @return The data prefix from an AVL node. */
+  static ByteBuffer getDataPrefix(AVLNode node, int size) {
+    ByteBuffer dataPrefix = ByteBuffer.allocate(size);
+    if (size > MAX_DATA_SIZE) dataPrefix.limit(MAX_DATA_SIZE);
+    node.getBlock().get((AVLNode.HEADER_SIZE + IDX_DATA) * Constants.SIZEOF_LONG, dataPrefix);
+    dataPrefix.rewind();
+    return dataPrefix;
+  }
+
+  /** @return The gNode from an AVL node. */
+  public static long getGNode(AVLNode node) { return node.getPayloadLong(IDX_GRAPH_NODE); }
+
+
+  /**
+   * Gets the remaining data of an object into the buffer.
+   * @param file The file to read the data from.
+   */
+  public static void getRemainingBytes(ByteBuffer data, RandomAccessFile file, long gNode) throws IOException {
+    // read the file starting at the data, plus the header, plus the already read portion
+    file.seek(toOffset(gNode) + HEADER + MAX_DATA_SIZE);
+    // read into the buffer, filling at the point where the data had been truncated.
+    int remainingBytes = data.limit() - data.position();
+    assert remainingBytes > 0;
+    int dataRead = file.read(data.array(), MAX_DATA_SIZE, remainingBytes);
+    if (dataRead != remainingBytes) throw new IOException("Unable to retrieve data from file.");
+  }
+
+
+  /**
+   * Converts a gNode to an offset into a file.
+   * @param gNode The gNode to convert
+   * @return The file offset associated with the gNode
+   */
+  public static final long toOffset(long gNode) {
+    long offset = gNode - NodePool.MIN_NODE;
+    if (offset % Constants.SIZEOF_LONG != 0) throw new IllegalArgumentException("Invalid gNode: " + gNode);
+    return offset;
+  }
+
+
+  /**
+   * Converts a file offset to the gNode stored at that location.
+   * @param offset The file offset to convert.
+   * @return The gNode associated with the file offset.
+   */
+  public static final long toGNode(long offset) {
+    return offset + NodePool.MIN_NODE;
+  }
+
+
+  /**
+   * Rounds this value down to the nearest long boundary.
+   * @param offset The offset to round.
+   * @return The closest offset >= the argument that is on a long boundary.
+   */
+  public static final int round(int offset) {
+    return (offset + MAX_PADDING) & OFFSET_MASK;
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolFactory.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolFactory.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolFactory.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,92 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+
+package org.mulgara.store.stringpool.xa11;
+
+// Java 2 standard packages
+import java.io.File;
+import java.io.IOException;
+
+// Third-party packages
+import org.apache.log4j.*;
+
+// Local packages
+import org.mulgara.resolver.spi.FactoryInitializer;
+import org.mulgara.resolver.spi.InitializerException;
+import org.mulgara.store.stringpool.StringPool;
+import org.mulgara.store.stringpool.StringPoolException;
+import org.mulgara.store.stringpool.StringPoolFactory;
+
+/**
+ * A {@link StringPoolFactory} that constructs {@link XA11StringPoolImpl} instances.
+ *
+ * @created Aug 11, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class XA11StringPoolFactory implements StringPoolFactory {
+
+  /** Logger. */
+  private static final Logger logger = Logger.getLogger(XA11StringPoolFactory.class.getName());
+
+  /** The singleton instance of this class. */
+  private static StringPoolFactory stringPoolFactory = null;
+
+  /** The base file names. */
+  private final String[] baseNames;
+
+  /**
+   * This constructor is only for internal use.  Use the {@link #newInstance} method
+   * to obtain instances.
+   */
+  private XA11StringPoolFactory(String[] baseNames) {
+    this.baseNames = baseNames;
+  }
+
+  /**
+   * Obtain the singleton instance of this factory.
+   *
+   * @param factoryInitializer  not used
+   */
+  static public StringPoolFactory newInstance(FactoryInitializer factoryInitializer) throws InitializerException {
+
+    if (stringPoolFactory == null) {
+      // Lazily initialize the singleton instance
+      File[] directories = factoryInitializer.getDirectories();
+      String[] paths = new String[directories.length];
+      for (int f = 0; f < directories.length; f++) paths[f] = directories[f].toString() + File.separatorChar + "xa11";
+      stringPoolFactory = new XA11StringPoolFactory(paths);
+    }
+
+    return stringPoolFactory;
+  }
+
+  //
+  // Methods implementing StringPoolFactory
+  //
+
+  /**
+   * {@inheritDoc StringPoolFactory}
+   */
+  public StringPool newStringPool() throws StringPoolException
+  {
+    try {
+      XA11StringPoolImpl xaStringPoolImpl = new XA11StringPoolImpl(baseNames);
+      return xaStringPoolImpl;
+    } catch (IOException e) {
+      logger.error("Couldn't construct string pool", e);
+      throw new StringPoolException("Couldn't construct string pool", e);
+    }
+  }
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImpl.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImpl.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImpl.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,2081 @@
+/*
+ * The contents of this file are subject to the Open Software License
+ * Version 3.0 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.opensource.org/licenses/osl-3.0.txt
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.net.URI;
+import java.nio.ByteOrder;
+import java.nio.channels.FileChannel;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.mulgara.query.Constraint;
+import org.mulgara.query.Cursor;
+import org.mulgara.query.TuplesException;
+import org.mulgara.query.Variable;
+import org.mulgara.store.StoreException;
+import org.mulgara.store.nodepool.NewNodeListener;
+import org.mulgara.store.nodepool.NodePool;
+import org.mulgara.store.nodepool.NodePoolException;
+import org.mulgara.store.statement.StatementStore;
+import org.mulgara.store.stringpool.SPComparator;
+import org.mulgara.store.stringpool.SPObject;
+import org.mulgara.store.stringpool.SPObjectFactory;
+import org.mulgara.store.stringpool.SPTypedLiteral;
+import org.mulgara.store.stringpool.StringPoolException;
+import org.mulgara.store.stringpool.SPObject.TypeCategory;
+import org.mulgara.store.stringpool.xa.SPObjectFactoryImpl;
+import org.mulgara.store.tuples.Annotation;
+import org.mulgara.store.tuples.RowComparator;
+import org.mulgara.store.tuples.SimpleTuplesFormat;
+import org.mulgara.store.tuples.Tuples;
+import org.mulgara.store.xa.AVLComparator;
+import org.mulgara.store.xa.AVLFile;
+import org.mulgara.store.xa.AVLNode;
+import org.mulgara.store.xa.AbstractBlockFile;
+import org.mulgara.store.xa.Block;
+import org.mulgara.store.xa.BlockFile;
+import org.mulgara.store.xa.LockFile;
+import org.mulgara.store.xa.SimpleXAResourceException;
+import org.mulgara.store.xa.XANodePool;
+import org.mulgara.store.xa.XAStringPool;
+import org.mulgara.store.xa.XAUtils;
+import org.mulgara.util.Constants;
+import org.mulgara.util.Pair;
+
+import static org.mulgara.store.stringpool.xa11.DataStruct.*;
+
+/**
+ * This is a WORM transactional string pool. The only write operations that are permitted
+ * are insertions. Deletions are ignored. The only exception to this is that rollback
+ * on a transaction results in a set of writes being abandoned.
+ *
+ * @created Aug 11, 2008
+ * @author Paul Gearon
+ * @copyright &copy; 2008 <a href="http://www.topazproject.org/">The Topaz Project</a>
+ * @licence <a href="{@docRoot}/../../LICENCE.txt">Open Software License v3.0</a>
+ */
+public class XA11StringPoolImpl implements XAStringPool, XANodePool {
+
+  /** Logger. */
+  private static final Logger logger = Logger.getLogger(XA11StringPoolImpl.class);
+
+  /** The number of metaroots in the metaroot file. */
+  private static final int NR_METAROOTS = 2;
+
+  /** The variables to use when pretending that the string pool is a Tuples. */
+  static final Variable[] VARIABLES = new Variable[] { StatementStore.VARIABLES[0] };
+
+  /** A factory for this class. */
+  static final SPObjectFactory SPO_FACTORY = SPObjectFactoryImpl.getInstance();
+
+  /** The main data structures are rooted on this filename. */
+  private String mainFilename;
+
+  /** The flat data structures are rooted on this filename. */
+  private String flatDataFilename;
+
+  /** The index file for mapping data to a gNode. */
+  private AVLFile dataToGNode;
+
+  /** The flat file for mapping gNodes to data. */
+  private RandomAccessFile gNodeToDataReadOnly;
+
+  /** The object for creating the output appender. */
+  private FileOutputStream gNodeToDataOutputStream;
+
+  /** A writing object for the flat file for mapping gNodes to data. */
+  private FileChannel gNodeToDataAppender;
+
+  /** Indicates that the current phase has been written to. */
+  private boolean dirty = true;
+
+  /** The next-gNode value. This corresponds to the end of the flat file. */
+  private long nextGNodeValue;
+
+  /** The object for handling blank node allocation. */
+  private BlankNodeAllocator blankNodeAllocator = new BlankNodeAllocator();
+
+  /** The latest phase in the index tree. */
+  private TreePhase currentPhase = null;
+
+  /** The next-gNode for the committed phase. */
+  private long committedNextGNode;
+
+  /** The LockFile that protects the string pool from being opened twice. */
+  private LockFile lockFile;
+
+  /** The BlockFile for the node pool metaroot file. */
+  private BlockFile metarootFile = null;
+
+  /** The metaroot info in the metaroot file. */
+  private Metaroot[] metaroots = new Metaroot[NR_METAROOTS];
+
+  /** A Token on the last committed phase. */
+  private TreePhase.Token committedPhaseToken = null;
+
+  /** Object used for locking on synchronized access to the committed phase. */
+  private Object committedPhaseLock = new Object();
+
+  /** Phase reference for when the phase is being written. */
+  private TreePhase.Token recordingPhaseToken = null;
+
+  /** Indicates that the phase is written but not yet acknowledged as valid. */
+  private boolean prepared = false;
+
+  /** The valid phase index on file to use.  Must always be 0 or 1. */
+  private int phaseIndex = 0;
+
+  /** The number of the current phase.  These increase monotonically. */
+  private int phaseNumber = 0;
+
+  /** A list of listeners to inform whenever a new node is created. */
+  private List<NewNodeListener> newNodeListeners = new LinkedList<NewNodeListener>();
+
+  /** A flag used to delay throwing an exception on the file version until it is needed. */
+  private boolean wrongFileVersion = false;
+
+  /**
+   * Create a string pool instance using a set of directories.
+   * @param basenames A list of paths for creating string pool files in.
+   *        Each path is expected to be on a separate file system.
+   * @throws IOException The files cannot be created or read.
+   */
+  public XA11StringPoolImpl(String[] basenames) throws IOException {
+    distributeFilenames(basenames);
+
+    lockFile = LockFile.createLockFile(mainFilename + ".sp.lock");
+
+    wrongFileVersion = false;
+    try {
+      try {
+        wrongFileVersion = !Metaroot.check(mainFilename + ".sp");
+      } catch (FileNotFoundException ex) {
+        // no-op
+      }
+
+      dataToGNode = new AVLFile(mainFilename + ".sp_avl", PAYLOAD_SIZE);
+      gNodeToDataOutputStream = new FileOutputStream(flatDataFilename, true);
+      gNodeToDataAppender = gNodeToDataOutputStream.getChannel();
+      gNodeToDataReadOnly = new RandomAccessFile(flatDataFilename, "r");
+
+    } catch (IOException ex) {
+      try {
+        close();
+      } catch (StoreException ex2) {
+        // no-op
+      }
+      throw ex;
+    }
+  }
+
+
+  /**
+   * Returns the most recent successful committed phase.
+   * @see org.mulgara.store.xa.SimpleXARecoveryHandler#recover()
+   */
+  public int[] recover() throws SimpleXAResourceException {
+    if (currentPhase != null) return new int[0];
+    if (wrongFileVersion) throw new SimpleXAResourceException("Wrong metaroot file version.");
+
+    try {
+      openMetarootFile(false);
+    } catch (IOException ex) {
+      throw new SimpleXAResourceException("I/O error", ex);
+    }
+
+    // Count the number of valid phases.
+    int phaseCount = 0;
+    if (metaroots[0].getValid() != 0) ++phaseCount;
+    if (metaroots[1].getValid() != 0) ++phaseCount;
+
+    // Read the phase numbers.
+    int[] phaseNumbers = new int[phaseCount];
+    int index = 0;
+    if (metaroots[0].getValid() != 0) phaseNumbers[index++] = metaroots[0].getPhaseNr();
+    if (metaroots[1].getValid() != 0) phaseNumbers[index++] = metaroots[1].getPhaseNr();
+    return phaseNumbers;
+  }
+
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#put(long, org.mulgara.store.stringpool.SPObject)
+   */
+  public void put(long node, SPObject spObject) throws StringPoolException {
+    throw new UnsupportedOperationException("Cannot manually allocate a gNode for this string pool.");
+  }
+
+
+  /**
+   * Stores an spObject and allocates a gNode to go with it.
+   * @param spObject The object to store.
+   * @return The new gNode associated with this object.
+   * @throws StringPoolException If the string pool could not allocate space.
+   */
+  public synchronized long put(SPObject spObject) throws StringPoolException {
+    try {
+      long gNode = nextGNodeValue;
+      DataStruct spObjectData = new DataStruct(spObject, nextGNodeValue);
+      // this is the secret sauce - gNodes allocation moves up by size of the data
+      nextGNodeValue += spObjectData.writeTo(gNodeToDataAppender);
+      mapObjectToGNode(spObjectData, spObject.getSPComparator());
+      informNodeListeners(gNode);
+      return gNode;
+    } catch (IOException e) {
+      throw new StringPoolException("Unable to write to data files.", e);
+    }
+  }
+
+
+  /**
+   * Sets the node pool for this string pool. Not used for this implementation.
+   * @param nodePool The node pool being set. Ignored.
+   */
+  public void setNodePool(XANodePool nodePool) {
+    if (nodePool != this) throw new IllegalArgumentException("XA 1.1 data pool requires an integrated node pool.");
+    if (logger.isDebugEnabled()) logger.debug("Setting a node pool for the XA 1.1 string pool. Ignored.");
+  }
+
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#findGNode(org.mulgara.store.stringpool.SPObject)
+   */
+  public long findGNode(SPObject spObject) throws StringPoolException {
+    checkInitialized();
+    return currentPhase.findGNode(spObject, false);
+  }
+
+  /**
+   * @deprecated
+   * @see org.mulgara.store.stringpool.StringPool#findGNode(org.mulgara.store.stringpool.SPObject, org.mulgara.store.nodepool.NodePool)
+   */
+  public long findGNode(SPObject spObject, NodePool nodePool) throws StringPoolException {
+    throw new UnsupportedOperationException("Cannot manually set the node pool for an XA 1.1 store.");
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#findGNode(org.mulgara.store.stringpool.SPObject, org.mulgara.store.nodepool.NodePool)
+   */
+  public long findGNode(SPObject spObject, boolean create) throws StringPoolException {
+    checkInitialized();
+    return currentPhase.findGNode(spObject, create);
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#findGNodes(org.mulgara.store.stringpool.SPObject, boolean, org.mulgara.store.stringpool.SPObject, boolean)
+   */
+  public Tuples findGNodes(SPObject lowValue, boolean inclLowValue,
+                           SPObject highValue, boolean inclHighValue) throws StringPoolException {
+    checkInitialized();
+    dirty = false;
+    return currentPhase.findGNodes(lowValue, inclLowValue, highValue, inclHighValue);
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#findGNodes(org.mulgara.store.stringpool.SPObject.TypeCategory, java.net.URI)
+   */
+  public Tuples findGNodes(TypeCategory typeCategory, URI typeURI) throws StringPoolException {
+    checkInitialized();
+    dirty = false;
+    return currentPhase.findGNodes(typeCategory, typeURI);
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#findSPObject(long)
+   */
+  public SPObject findSPObject(long node) throws StringPoolException {
+    // blank nodes don't get loaded up as an SPObject
+    if (BlankNodeAllocator.isBlank(node)) return null;
+    // outside of the allocated range
+    if (node >= nextGNodeValue) return null;
+    try {
+      return new DataStruct(gNodeToDataReadOnly, node).getSPObject();
+    } catch (IOException ioe) {
+      throw new StringPoolException("Unable to load data from data pool.", ioe);
+    }
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#getSPObjectFactory()
+   */
+  public SPObjectFactory getSPObjectFactory() {
+    return SPO_FACTORY;
+  }
+
+  /**
+   * @see org.mulgara.store.stringpool.StringPool#remove(long)
+   * Nodes are never removed.
+   * @return Always true, so that anyone thinking it should have been removed will
+   *         get the answer they were expecting.
+   */
+  public boolean remove(long node) throws StringPoolException {
+    return true;
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XAStringPool#close()
+   */
+  public void close() throws StoreException {
+    try {
+      close(false);
+    } catch (IOException ex) {
+      throw new StringPoolException("I/O error closing string pool.", ex);
+    }
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XAStringPool#delete()
+   */
+  public void delete() throws StoreException {
+    try {
+      close(true);
+    } catch (IOException ex) {
+      throw new StringPoolException("I/O error deleting string pool.", ex);
+    } finally {
+      gNodeToDataReadOnly = null;
+      gNodeToDataAppender = null;
+      dataToGNode = null;
+      metarootFile = null;
+    }
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XAStringPool#newReadOnlyStringPool()
+   */
+  public XAStringPool newReadOnlyStringPool() {
+    return new ReadOnlyStringPool();
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XAStringPool#newWritableStringPool()
+   */
+  public XAStringPool newWritableStringPool() {
+    return this;
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.SimpleXAResource#commit()
+   */
+  public void commit() throws SimpleXAResourceException {
+    synchronized (this) {
+      if (!prepared) throw new SimpleXAResourceException("commit() called without previous prepare().");
+  
+      // Perform a commit.
+      try {
+        // New phase is now marked valid. Invalidate the metaroot of the old phase.
+        Metaroot mr = metaroots[1 - phaseIndex];
+        mr.setValid(0);
+        mr.write();
+        metarootFile.force();
+  
+        // Release the token for the previously committed phase.
+        synchronized (committedPhaseLock) {
+          if (committedPhaseToken != null) committedPhaseToken.release();
+          committedPhaseToken = recordingPhaseToken;
+        }
+        recordingPhaseToken = null;
+        blankNodeAllocator.commit();
+      } catch (IOException ex) {
+        logger.fatal("I/O error while performing commit.", ex);
+        throw new SimpleXAResourceException("I/O error while performing commit.", ex);
+      } finally {
+        prepared = false;
+        if (recordingPhaseToken != null) {
+          // Something went wrong!
+          recordingPhaseToken.release();
+          recordingPhaseToken = null;
+  
+          logger.error("Commit failed.  Calling close().");
+          try {
+            close();
+          } catch (Throwable t) {
+            logger.error("Exception on forced close()", t);
+          }
+        }
+      }
+    }
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.SimpleXAResource#getPhaseNumber()
+   */
+  public synchronized int getPhaseNumber() throws SimpleXAResourceException {
+    checkInitialized();
+    return phaseNumber;
+  }
+
+
+  /**
+   * Writes all transactional data to disk, in preparation for a full commit.
+   * @throws SimpleXAResourceException Occurs due to an IO error when writing data to disk.
+   */
+  public void prepare() throws SimpleXAResourceException {
+    // TODO: This synchronization is possibly redundant due to the global lock in StringPoolSession
+    synchronized(this) {
+      checkInitialized();
+  
+      if (prepared) {
+        // prepare already performed.
+        throw new SimpleXAResourceException("prepare() called twice.");
+      }
+  
+      try {
+        // Perform a prepare.
+        recordingPhaseToken = currentPhase.new Token();
+        TreePhase recordingPhase = currentPhase;
+        currentPhase = new TreePhase();
+  
+        // Ensure that all data associated with the phase is on disk.
+        dataToGNode.force();
+        gNodeToDataAppender.force(true);
+  
+        // Write the metaroot.
+        int newPhaseIndex = 1 - phaseIndex;
+        int newPhaseNumber = phaseNumber + 1;
+  
+        Metaroot metaroot = metaroots[newPhaseIndex];
+        metaroot.setValid(0);
+        metaroot.setPhaseNr(newPhaseNumber);
+        metaroot.setFlatFileSize(DataStruct.toOffset(nextGNodeValue));
+        blankNodeAllocator.prepare(metaroot);
+        if (logger.isDebugEnabled()) logger.debug("Writing data pool metaroot for phase: " + newPhaseNumber);
+        metaroot.addPhase(recordingPhase);
+        metaroot.write();
+        metarootFile.force();
+        metaroot.setValid(1);
+        metaroot.write();
+        metarootFile.force();
+  
+        phaseIndex = newPhaseIndex;
+        phaseNumber = newPhaseNumber;
+        committedNextGNode = nextGNodeValue;
+        prepared = true;
+      } catch (IOException ex) {
+        logger.error("I/O error while performing prepare.", ex);
+        throw new SimpleXAResourceException("I/O error while performing prepare.", ex);
+      } finally {
+        if (!prepared) {
+          logger.error("Prepare failed.");
+          if (recordingPhaseToken != null) {
+            recordingPhaseToken.release();
+            recordingPhaseToken = null;
+          }
+        }
+      }
+    }
+  }
+
+
+  /**
+   * Drops all data in the current transaction, recovering any used resources.
+   * @throws SimpleXAResourceException Caused by any IO errors.
+   */
+  public void rollback() throws SimpleXAResourceException {
+    // TODO: This synchronization is probably redundant due to the global lock in StringPoolSession
+    synchronized (this) {
+      checkInitialized();
+      try {
+        if (prepared) {
+          // Restore phaseIndex and phaseNumber to their previous values.
+          phaseIndex = 1 - phaseIndex;
+          --phaseNumber;
+          recordingPhaseToken = null;
+          prepared = false;
+  
+          // Invalidate the metaroot of the other phase.
+          Metaroot mr = metaroots[1 - phaseIndex];
+          mr.setValid(0);
+          mr.write();
+          metarootFile.force();
+        }
+      } catch (IOException ex) {
+        throw new SimpleXAResourceException("I/O error while performing rollback (invalidating metaroot)", ex);
+      } finally {
+        try {
+          try {
+            blankNodeAllocator.rollback();
+            nextGNodeValue = committedNextGNode;
+            long offset = DataStruct.toOffset(nextGNodeValue);
+            gNodeToDataAppender.truncate(offset);
+            gNodeToDataAppender.position(offset);
+          } catch (IOException ioe) {
+            throw new SimpleXAResourceException("I/O error while performing rollback (new committed phase)", ioe);
+          }
+        } finally {
+          try {
+            currentPhase = new TreePhase(committedPhaseToken.getPhase());
+          } catch (IOException ex) {
+            throw new SimpleXAResourceException("I/O error while performing rollback (new committed phase)", ex);
+          }
+        }
+      }
+    }
+  }
+
+
+  public void refresh() throws SimpleXAResourceException {
+    /* no-op */
+  }
+
+  public void release() throws SimpleXAResourceException {
+    /* no-op */
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.SimpleXARecoveryHandler#clear()
+   */
+  public synchronized void clear() throws IOException, SimpleXAResourceException {
+    if (currentPhase == null) clear(0);
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.SimpleXARecoveryHandler#clear(int)
+   */
+  public void clear(int phaseNumber) throws IOException, SimpleXAResourceException {
+    if (currentPhase != null) throw new IllegalStateException("StringPool already has a current phase.");
+
+    openMetarootFile(true);
+
+    synchronized (committedPhaseLock) {
+      committedPhaseToken = new TreePhase().new Token();
+    }
+    this.phaseNumber = phaseNumber;
+    phaseIndex = 1;
+    dataToGNode.clear();
+    blankNodeAllocator.clear();
+
+    // clear the flat file
+    nextGNodeValue = NodePool.MIN_NODE;
+    committedNextGNode = NodePool.MIN_NODE;
+    // this forces a seek to 0
+    gNodeToDataAppender.truncate(0);
+
+    currentPhase = new TreePhase();
+  }
+
+
+  /**
+   * This gets called after {@link #recover()}.
+   * It selects the active phase to use, and sets all the internal data related to a phase.
+   * @see org.mulgara.store.xa.SimpleXARecoveryHandler#selectPhase(int)
+   */
+  public void selectPhase(int phaseNumber) throws IOException, SimpleXAResourceException {
+    if (currentPhase != null) throw new SimpleXAResourceException("selectPhase() called on initialized StringPoolImpl.");
+    if (metarootFile == null) throw new SimpleXAResourceException("String pool metaroot file is not open.");
+
+    // Locate the metaroot corresponding to the given phase number.
+    if (metaroots[0].getValid() != 0 && metaroots[0].getPhaseNr() == phaseNumber) {
+      phaseIndex = 0;
+      // A new phase will be saved in the other metaroot.
+    } else if (metaroots[1].getValid() != 0 && metaroots[1].getPhaseNr() == phaseNumber) {
+      phaseIndex = 1;
+      // A new phase will be saved in the other metaroot.
+    } else {
+      throw new SimpleXAResourceException("Phase number [" + phaseNumber + "] is not present in the metaroot file. Found [" + metaroots[0].getPhaseNr() + "], [" + metaroots[1].getPhaseNr() + "]");
+    }
+
+    Metaroot metaroot = metaroots[phaseIndex];
+
+    // Load a duplicate of the selected phase.  The duplicate will have a
+    // phase number which is one higher than the original phase.
+    try {
+      synchronized (committedPhaseLock) {
+        committedPhaseToken = new TreePhase(metaroot.block).new Token();
+      }
+      this.phaseNumber = phaseNumber;
+    } catch (IllegalStateException ex) {
+      throw new SimpleXAResourceException("Cannot construct initial phase.", ex);
+    }
+    // load all the remaining state for this phase
+    blankNodeAllocator.setCurrentState(metaroot.getNextBlankNode());
+    long fileSize = metaroot.getFlatFileSize();
+    committedNextGNode = DataStruct.toGNode(fileSize);
+    nextGNodeValue = committedNextGNode;
+    updateAppender(fileSize);
+    currentPhase = new TreePhase();
+
+    // Invalidate the on-disk metaroot that the new phase will be saved to.
+    Metaroot mr = metaroots[1 - phaseIndex];
+    mr.setValid(0);
+    mr.write();
+    metarootFile.force();
+  }
+
+
+  public void newNode(long node) throws Exception {
+    /* no-op: This was already allocated by this object */
+  }
+
+  public void releaseNode(long node) {
+    /* no-op */
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XANodePool#addNewNodeListener(org.mulgara.store.nodepool.NewNodeListener)
+   */
+  public void addNewNodeListener(NewNodeListener l) {
+    if (l != this) newNodeListeners.add(l);
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XANodePool#newReadOnlyNodePool()
+   */
+  public XANodePool newReadOnlyNodePool() {
+    return this;
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XANodePool#newWritableNodePool()
+   */
+  public XANodePool newWritableNodePool() {
+    return this;
+  }
+
+
+  /**
+   * @see org.mulgara.store.xa.XANodePool#removeNewNodeListener(org.mulgara.store.nodepool.NewNodeListener)
+   */
+  public void removeNewNodeListener(NewNodeListener l) {
+    newNodeListeners.remove(l);
+  }
+
+
+  /**
+   * Allocate a new blank node. This interface was defined for allocating all nodes
+   * but standard node allocation is now handled internally within this data pool
+   * rather than calling back into this method.
+   * @see org.mulgara.store.nodepool.NodePool#newNode()
+   */
+  public long newNode() throws NodePoolException {
+    long node = blankNodeAllocator.allocate();
+    return informNodeListeners(node);
+  }
+
+
+  /**
+   * Inform all listeners that a new node was just allocated.
+   * @param newNode The newly allocated node.
+   * @return The node that was passed to all the listeners.
+   */
+  private long informNodeListeners(long newNode) {
+    for (NewNodeListener l: newNodeListeners) {
+      try {
+        l.newNode(newNode);
+      } catch (Exception e) {
+        logger.error("Error informing object [" + l.getClass() + ":" + l + "] of a new node", e);
+      }
+    }
+    return newNode;
+  }
+
+
+  /**
+   * Inserts an object into an index, so it can be looked up to find a gNode.
+   * @param spObjectData The data for the object used as a key to the index.
+   * @param comparator The SPComparator used for compararing data of the provided type.
+   */
+  private void mapObjectToGNode(DataStruct spObjectData, SPComparator comparator) throws StringPoolException, IOException {
+    checkInitialized();
+    if (!dirty && currentPhase.isInUse()) {
+      currentPhase = new TreePhase();
+      dirty = true;
+    }
+
+    if (logger.isDebugEnabled()) logger.debug("put(" + spObjectData.getGNode() + ", " + spObjectData + ")");
+
+    try {
+      currentPhase.put(spObjectData, comparator);
+    } catch (RuntimeException ex) {
+      if (logger.isDebugEnabled()) logger.debug("RuntimeException in put()", ex);
+      throw ex;
+    } catch (Error e) {
+      if (logger.isDebugEnabled()) logger.debug("Error in put()", e);
+      throw e;
+    } catch (StringPoolException ex) {
+      if (logger.isDebugEnabled()) logger.debug("StringPoolException in put()", ex);
+      throw ex;
+    }
+  }
+
+
+  /**
+   * Checks that the phase for the tree index has been set.
+   * @throws IllegalStateException If the currentPhase is not initialized.
+   */
+  private void checkInitialized() {
+    if (currentPhase == null) {
+      throw new IllegalStateException("No current phase. Object Pool has not been initialized or has been closed.");
+    }
+  }
+
+
+  /**
+   * Remove all mappings of files, so we can close them, and possibly delete them.
+   */
+  public synchronized void unmap() {
+    if (committedPhaseToken != null) {
+      recordingPhaseToken = null;
+      prepared = false;
+
+      try {
+        new TreePhase(committedPhaseToken.getPhase());
+      } catch (Throwable t) {
+        logger.warn("Exception while rolling back in unmap()", t);
+      }
+      currentPhase = null;
+
+      synchronized (committedPhaseLock) {
+        committedPhaseToken.release();
+        committedPhaseToken = null;
+      }
+    }
+
+    if (dataToGNode != null) dataToGNode.unmap();
+
+    if (metarootFile != null) {
+      if (metaroots[0] != null) metaroots[0] = null;
+      if (metaroots[1] != null) metaroots[1] = null;
+      metarootFile.unmap();
+    }
+  }
+
+
+  /**
+   * Closes all the files involved with a data pool
+   * @param deleteFiles Remove files after closing them.
+   * @throws IOException There was an error accessing the filesystem.
+   */
+  private void close(boolean deleteFiles) throws IOException {
+    try {
+      unmap();
+    } finally {
+      try {
+        if (gNodeToDataReadOnly != null) gNodeToDataReadOnly.close();
+      } finally {
+        try {
+          if (gNodeToDataAppender != null) gNodeToDataAppender.close();
+        } finally {
+          try {
+            if (deleteFiles) new File(flatDataFilename).delete();
+          } finally {
+            try {
+              if (dataToGNode != null) {
+                if (deleteFiles) dataToGNode.delete();
+                else dataToGNode.close();
+              }
+            } finally {
+              try {
+                if (metarootFile != null) {
+                  if (deleteFiles) metarootFile.delete();
+                  else metarootFile.close();
+                }
+              } finally {
+                if (lockFile != null) {
+                  lockFile.release();
+                  lockFile = null;
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+
+  /**
+   * Reads the data from the metaroot file, and initializes the files.
+   *
+   * @param clear If <code>true</code> then empties all files.
+   * @throws IOException An error reading or writing files.
+   * @throws SimpleXAResourceException An internal error, often caused by an IOException
+   */
+  private void openMetarootFile(boolean clear) throws IOException, SimpleXAResourceException {
+    if (metarootFile == null) {
+      metarootFile = AbstractBlockFile.openBlockFile(
+          mainFilename + ".sp", Metaroot.getSize() * Constants.SIZEOF_LONG, BlockFile.IOType.EXPLICIT
+      );
+
+      // check that the file is the right size
+      long nrBlocks = metarootFile.getNrBlocks();
+      if (nrBlocks != NR_METAROOTS) {
+        if (nrBlocks > 0) {
+          logger.info("String pool metaroot file for triple store \"" + mainFilename +
+                      "\" has invalid number of blocks: " + nrBlocks);
+          // rewrite the file
+          if (nrBlocks < NR_METAROOTS) {
+            clear = true;
+            metarootFile.clear();
+          }
+        } else {
+          // Empty file, so initialize it
+          clear = true;
+        }
+        // expand or contract the file as necessary
+        metarootFile.setNrBlocks(NR_METAROOTS);
+      }
+
+      metaroots[0] = new Metaroot(this, metarootFile.readBlock(0));
+      metaroots[1] = new Metaroot(this, metarootFile.readBlock(1));
+    }
+
+    if (clear) {
+      // Invalidate the metaroots on disk.
+      metaroots[0].clear().write();
+      metaroots[1].clear().write();
+      metarootFile.force();
+    }
+  }
+
+
+  /**
+   * Update the data appender. This moves to the end of the file if neccessary, and
+   * truncates the file to this endpoint if it is too long (due to an abandoned transaction).
+   * @param fileSize The end position of the file
+   * @throws IOException Error moving in the file or changing its length.
+   */
+  private void updateAppender(long fileSize) throws IOException {
+    // truncate if the file is longer than the appending position
+    if (gNodeToDataAppender.size() > fileSize) gNodeToDataAppender.truncate(fileSize);
+    gNodeToDataAppender.position(fileSize);
+  }
+
+
+  /**
+   * Makes the best available use of the provided paths, to reduce seek contention
+   * where possible. Directories in this list are built from the head, with each
+   * extra directory in the list being used to optimize operations on certain files.
+   * The presumption of multiple directories is that each one will occur on
+   * different filesystems.
+   * @param basenames An array of paths available for storing this string pool.
+   */
+  private void distributeFilenames(String[] basenames) {
+    if (basenames == null || basenames.length == 0) {
+      throw new IllegalArgumentException("At least one directory must be provided for storing the string pool");
+    }
+    mainFilename = basenames[0];
+    flatDataFilename = mainFilename;
+
+    if (basenames.length > 1) {
+      flatDataFilename = basenames[1];
+    }
+
+    flatDataFilename += ".sp_nd";
+  }
+
+
+  /**
+   * This is a struct for holding metaroot information.
+   */
+  final static class Metaroot {
+
+    /** Unique int value in metaroot to mark this file as a string pool. */
+    static final int FILE_MAGIC = 0xa5f3f4f6;
+
+    /** The current int version of this file format, stored in the metaroot. */
+    static final int FILE_VERSION = 1;
+
+    /** Index of the file magic number (integer) within each of the two on-disk metaroots. */
+    static final int IDX_MAGIC = 0;
+
+    /** Index of the file version number (integer) within each of the two on-disk metaroots. */
+    static final int IDX_VERSION = 1;
+
+    /** Index of the valid flag (integer) within each of the two on-disk metaroots. */
+    static final int IDX_VALID = 2;
+
+    /** The index of the phase number (integer) in the on-disk phase. */
+    static final int IDX_PHASE_NUMBER = 3;
+
+    /** The integer index of long with the committed flat file size. */
+    static final int IDX_FLAT_FILE_SIZE = 4;
+
+    /** The long index of long with the committed flat file size. */
+    static final int IDX_L_FLAT_FILE_SIZE = 2;
+
+    /** The integer index of long with the committed next blank node. */
+    static final int IDX_NEXT_BLANK = 6;
+
+    /** The long index of long with the committed next blank node. */
+    static final int IDX_L_NEXT_BLANK = 3;
+
+    /** The size of the header of a metaroot in longs. */
+    static final int HEADER_SIZE_LONGS = IDX_L_NEXT_BLANK + 1;
+
+    /** The size of the header of a metaroot in ints. */
+    static final int HEADER_SIZE_INTS = HEADER_SIZE_LONGS * 2;
+
+    /** The size of a metaroot in longs. This is the metaroot header, plus the rest of the data given to the metaroot. */
+    static final int METAROOT_SIZE_LONGS = HEADER_SIZE_LONGS + TreePhase.RECORD_SIZE;
+
+    /** The size of a metaroot in longs. */
+    static final int METAROOT_SIZE_INTS = METAROOT_SIZE_LONGS * 2;
+
+    /** The VALID flag for a metaroot. */
+    int valid;
+
+    /** The phase number for a metaroot. */
+    int phaseNr;
+
+    /** The size of the flat file described by this metaroot. */
+    long flatFileSize;
+
+    /** The metaroot description of the next blank node to be allocated. */
+    long nextBlankNode;
+
+    /** The block this data structure sits on top of. */
+    final Block block;
+
+    /**
+     * Creates a new metaroot around a block.
+     * @param block The block to build the structure around.
+     */
+    public Metaroot(XA11StringPoolImpl currentPool, Block block) throws IOException {
+      this.block = block;
+      read(currentPool);
+    }
+
+    /** Gets the total size of this block, in LONG values. */
+    public static int getSize() { return METAROOT_SIZE_LONGS; }
+
+    /** Gets the size of the header portion of this block, in LONG values. */
+    public static int getHeaderSize() { return HEADER_SIZE_LONGS; }
+
+    /**
+     * Clears out a block holding metaroot information.
+     * @param block The block to clear.
+     */
+    public Metaroot clear() {
+      block.putInt(IDX_MAGIC, FILE_MAGIC);
+      block.putInt(IDX_VERSION, FILE_VERSION);
+      block.putInt(IDX_VALID, 0);
+      block.putInt(IDX_PHASE_NUMBER, 0);
+      block.putLong(IDX_L_FLAT_FILE_SIZE, 0);
+      block.putLong(IDX_L_NEXT_BLANK, BlankNodeAllocator.FIRST);
+      int[] empty = new int[METAROOT_SIZE_INTS - HEADER_SIZE_INTS];
+      block.put(HEADER_SIZE_INTS, empty);
+      valid = 0;
+      phaseNr = 0;
+      flatFileSize = 0;
+      return this;
+    }
+
+    /**
+     * Writes this metaroot information to a block, sans phase information
+     */
+    public Metaroot writeAllToBlock() {
+      block.putInt(IDX_MAGIC, FILE_MAGIC);
+      block.putInt(IDX_VERSION, FILE_VERSION);
+      block.putInt(IDX_VALID, valid);
+      block.putInt(IDX_PHASE_NUMBER, phaseNr);
+      block.putLong(IDX_L_FLAT_FILE_SIZE, flatFileSize);
+      block.putLong(IDX_L_NEXT_BLANK, nextBlankNode);
+      // phase information is not written
+      return this;
+    }
+
+    /**
+     * Writes the metaroot block out.
+     * @return The current metaroot.
+     */
+    public Metaroot write() throws IOException {
+      block.write();
+      return this;
+    }
+
+    /**
+     * Reads metaroot information out of a block and into this structure.
+     * @param block The block to read.
+     */
+    public Metaroot read(XA11StringPoolImpl currentPool) throws IOException {
+      valid = block.getInt(IDX_VALID);
+      phaseNr = block.getInt(IDX_PHASE_NUMBER);
+      flatFileSize = block.getLong(IDX_L_FLAT_FILE_SIZE);
+      nextBlankNode = block.getLong(IDX_L_NEXT_BLANK);
+      return this;
+    }
+
+    /**
+     * Tests if this metaroot contains appropriate metaroot information.
+     * @return <code>true</code> for a block with an appropriate header, <code>false</code> otherwise.
+     */
+    public boolean check() {
+      return FILE_MAGIC == block.getInt(IDX_MAGIC) && FILE_VERSION == block.getInt(IDX_VERSION);
+    }
+
+    /**
+     * Tests if a block contains appropriate metaroot information.
+     * @param block The block to test.
+     * @return <code>true</code> for a block with an appropriate header, <code>false</code> otherwise.
+     */
+    public static boolean check(Block block) {
+      return FILE_MAGIC == block.getInt(IDX_MAGIC) && FILE_VERSION == block.getInt(IDX_VERSION);
+    }
+
+    /**
+     * Tests if a raw file starts with appropriate metaroot information.
+     * @param filename The name of the file to test.
+     * @return <code>true</code> for a file with an appropriate header, <code>false</code> otherwise.
+     */
+    public static boolean check(String filename) throws IOException {
+      RandomAccessFile file = new RandomAccessFile(filename, "r");
+      try {
+        if (file.length() < 2 * Constants.SIZEOF_INT) return false;
+        int fileMagic = file.readInt();
+        int fileVersion = file.readInt();
+        if (AbstractBlockFile.byteOrder != ByteOrder.BIG_ENDIAN) {
+          fileMagic = XAUtils.bswap(fileMagic);
+          fileVersion = XAUtils.bswap(fileVersion);
+        }
+        if (FILE_MAGIC != fileMagic || FILE_VERSION != fileVersion) return false;
+      } finally {
+        file.close();
+      }
+      return true;
+    }
+
+    public int getVersion() { return FILE_MAGIC; }
+    public int getMagicNumber() { return FILE_VERSION; }
+    public int getValid() { return valid; }
+    public int getPhaseNr() { return phaseNr; }
+    public long getFlatFileSize() { return flatFileSize; }
+    public long getNextBlankNode() { return nextBlankNode; }
+
+    public void setValid(int valid) { this.valid = valid; block.putInt(IDX_VALID, valid); }
+    public void setPhaseNr(int phaseNr) { this.phaseNr = phaseNr; block.putInt(IDX_PHASE_NUMBER, phaseNr); }
+    public void setFlatFileSize(long flatFileSize) { this.flatFileSize = flatFileSize; block.putLong(IDX_L_FLAT_FILE_SIZE, flatFileSize); }
+    public void setNextBlankNode(long nextBlankNode) { this.nextBlankNode = nextBlankNode; block.putLong(IDX_L_NEXT_BLANK, nextBlankNode); }
+    public void addPhase(TreePhase phase) { phase.avlFilePhase.writeToBlock(block, HEADER_SIZE_LONGS); }
+
+  }
+
+  /**
+   * An internal read-only view of the current string pool.
+   */
+  final class ReadOnlyStringPool implements XAStringPool {
+
+    /** The phase this string pool is associated with. */
+    TreePhase phase;
+
+    /** Releases resources held by the string pool. Not used. */
+    public void close() throws StringPoolException {
+      throw new UnsupportedOperationException("Trying to close a read-only string pool.");
+    }
+
+    /** Deletes files used by the string pool. Not used. */
+    public void delete() throws StringPoolException {
+      throw new UnsupportedOperationException("Trying to delete a read-only string pool.");
+    }
+
+    public XAStringPool newReadOnlyStringPool() {
+      throw new UnsupportedOperationException("Read-only string pools are not used to manage other string pools.");
+    }
+
+    public XAStringPool newWritableStringPool() {
+      throw new UnsupportedOperationException("Read-only string pools are not used to manage other string pools.");
+    }
+
+    public int[] recover() throws SimpleXAResourceException {
+      throw new UnsupportedOperationException("Attempting to recover ReadOnlyStringPool");
+    }
+
+    public void selectPhase(int phaseNumber) throws IOException, SimpleXAResourceException {
+      throw new UnsupportedOperationException("Attempting to selectPhase of ReadOnlyStringPool");
+    }
+
+    public void newNode(long node) throws Exception {
+      throw new UnsupportedOperationException("Cannot write to a read-only string pool.");
+    }
+
+    public void releaseNode(long node) throws Exception {
+      throw new UnsupportedOperationException("Cannot write to a read-only string pool.");
+    }
+
+    public void put(long node, SPObject spObject) throws StringPoolException {
+      throw new UnsupportedOperationException("Cannot write to a read-only string pool.");
+    }
+
+    public boolean remove(long node) throws StringPoolException {
+      throw new UnsupportedOperationException("Cannot write to a read-only string pool.");
+    }
+    
+    public void commit() throws SimpleXAResourceException { }
+    public void prepare() throws SimpleXAResourceException { }
+    public void rollback() throws SimpleXAResourceException { }
+    public void clear() throws IOException, SimpleXAResourceException { }
+    public void clear(int phaseNumber) throws IOException, SimpleXAResourceException { }
+
+    public void refresh() throws SimpleXAResourceException { /* no-op */ }
+
+    public void release() throws SimpleXAResourceException { }
+
+    public int getPhaseNumber() throws SimpleXAResourceException {
+      return phaseNumber;
+    }
+
+    public long findGNode(SPObject spObject) throws StringPoolException {
+      return XA11StringPoolImpl.this.findGNode(spObject);
+    }
+
+    public long findGNode(SPObject spObject, NodePool nodePool) throws StringPoolException {
+      throw new UnsupportedOperationException("Cannot manually set the node pool for an XA 1.1 store.");
+    }
+
+    public Tuples findGNodes(SPObject lowValue, boolean inclLowValue, SPObject highValue, boolean inclHighValue) throws StringPoolException {
+      return XA11StringPoolImpl.this.findGNodes(lowValue, inclLowValue, highValue, inclHighValue);
+    }
+
+    public Tuples findGNodes(TypeCategory typeCategory, URI typeURI) throws StringPoolException {
+      return XA11StringPoolImpl.this.findGNodes(typeCategory, typeURI);
+    }
+
+    public SPObject findSPObject(long node) throws StringPoolException {
+      return XA11StringPoolImpl.this.findSPObject(node);
+    }
+
+    public SPObjectFactory getSPObjectFactory() {
+      return SPO_FACTORY;
+    }
+
+    public long put(SPObject spObject) throws StringPoolException, NodePoolException {
+      throw new UnsupportedOperationException("Cannot write to a read-only string pool.");
+    }
+
+    public void setNodePool(XANodePool nodePool) {
+      // NO-OP
+    }
+
+    public long findGNode(SPObject spObject, boolean create) throws StringPoolException {
+      if (create) throw new UnsupportedOperationException("Trying to modify a read-only string pool.");
+      return phase.findGNode(spObject, false);
+    }
+
+  }
+
+
+  /**
+   * Represents the root of an index tree. This root is updated for each new phase.
+   */
+  private class TreePhase {
+
+    /** The size of a phase record, in Longs. */
+    static final int RECORD_SIZE = AVLFile.Phase.RECORD_SIZE;
+
+    /** The underlying tree to manage. */
+    private AVLFile.Phase avlFilePhase;
+
+    /**
+     * Create a new phase for the tree.
+     */
+    public TreePhase() throws IOException {
+      avlFilePhase = dataToGNode.new Phase();
+    }
+
+    /**
+     * A copy constructor for a phase.
+     * @param p The existing phase to build this
+     * @throws IOException Caused by an IO error in the under AVL tree.
+     */
+    TreePhase(TreePhase p) throws IOException {
+      assert p != null;
+
+      avlFilePhase = dataToGNode.new Phase(p.avlFilePhase);
+      // current phase should be set to this
+      dirty = true;
+    }
+
+
+    /**
+     * A constructor from a block on disk.
+     * @param b The block to read from.
+     * @param offset The offset into the block to read.
+     * @throws IOException Caused by an IO error reading the block.
+     */
+    TreePhase(Block b) throws IOException {
+      avlFilePhase = dataToGNode.new Phase(b, Metaroot.HEADER_SIZE_LONGS);
+      // current phase should be set to this
+      dirty = true;
+    }
+
+
+    /**
+     * Indicates if there are any remaining readers on the current phase.
+     * @return <code>true</code> if the phase is in use.
+     */
+    public boolean isInUse() {
+      return avlFilePhase.isInUse();
+    }
+
+    /**
+     * Inserts a node into the tree, mapping data onto a long.
+     * @param spObject The Data to use as the index key.
+     * @param gNode The long to map the data to.
+     */
+    public void put(DataStruct objectData, SPComparator spComparator) throws StringPoolException {
+      if (objectData.getGNode() < NodePool.MIN_NODE) throw new IllegalArgumentException("gNode < MIN_NODE. Object = " + objectData);
+
+      AVLNode[] findResult = null;
+      try {
+        AVLComparator avlComparator = new DataAVLComparator(spComparator, objectData, gNodeToDataReadOnly);
+
+        // Find the adjacent nodes.
+        findResult = avlFilePhase.find(avlComparator, null);
+        if (findResult != null && findResult.length == 1) {
+          throw new StringPoolException("SPObject already exists.  (existing graph node: " + findResult[0].getPayloadLong(IDX_GRAPH_NODE) + ")");
+        }
+
+        put(objectData, findResult);
+
+      } catch (IOException ex) {
+        throw new StringPoolException("I/O Error", ex);
+      } finally {
+        if (findResult != null) AVLFile.release(findResult);
+      }
+    }
+
+
+    /**
+     * Inserts data into the tree, allocating a new node to store the data in.
+     * @param objectData The data to store.
+     * @param findResult A pair of nodes that the new node must fit between,
+     *        or <code>null</code> if the tree is empty.
+     * @throws StringPoolException If the data is already in the tree.
+     * @throws IOException If the tree could not be written to.
+     */
+    private void put(DataStruct objectData, AVLNode[] findResult) throws StringPoolException, IOException {
+      // Create the new AVLNode.
+      AVLNode newNode = avlFilePhase.newAVLNodeInstance();
+      objectData.writeTo(newNode);
+      newNode.write();
+
+      if (findResult == null) {
+        avlFilePhase.insertFirst(newNode);
+      } else {
+        // Insert the node into the tree.
+        int li = AVLFile.leafIndex(findResult);
+        findResult[li].insert(newNode, 1 - li);
+      }
+      newNode.release();
+    }
+
+
+    /**
+     * Finds a graph node matching a given SPObject.
+     * @param spObject The SPObject to search on.
+     * @param create If <code>true</code> then new nodes are to be allocated when an SPObject
+     *        is not found.
+     * @return The graph node. <code>Graph.NONE</code> if not found and <var>create</var>
+     *         is <code>false</code>.
+     * @throws StringPoolException For an internal search error.
+     */
+    long findGNode(SPObject spObject, boolean create) throws StringPoolException {
+      if (spObject == null) throw new StringPoolException("spObject parameter is null");
+
+      long gNode;
+      AVLNode[] findResult = null;
+      try {
+        SPComparator spComparator = spObject.getSPComparator();
+        DataStruct objectData = new DataStruct(spObject);
+        AVLComparator avlComparator = new DataAVLComparator(spComparator, objectData, gNodeToDataReadOnly);
+
+        // Find the SPObject.
+        findResult = avlFilePhase.find(avlComparator, null);
+        if (findResult != null && findResult.length == 1) {
+          gNode = findResult[0].getPayloadLong(IDX_GRAPH_NODE);
+        } else {
+          if (create) {
+            gNode = nextGNodeValue;
+            objectData.setGNode(gNode);
+            // allocated gNodes move up by the size of the data between them
+            nextGNodeValue += objectData.writeTo(gNodeToDataAppender);
+            put(objectData, findResult);
+            informNodeListeners(gNode);
+          } else {
+            // Not found.
+            gNode = NodePool.NONE;
+          }
+        }
+      } catch (IOException ex) {
+        throw new StringPoolException("I/O Error", ex);
+      } catch (RuntimeException ex) {
+        if (logger.isDebugEnabled()) logger.debug("RuntimeException in findGNode(" + spObject + ")", ex);
+        throw ex;
+      } catch (Error e) {
+        if (logger.isDebugEnabled()) logger.debug("Error in findGNode(" + spObject + ")", e);
+        throw e;
+      } finally {
+        if (findResult != null) AVLFile.release(findResult);
+      }
+
+      if (logger.isDebugEnabled()) logger.debug("findGNode(" + spObject + ") = " + gNode);
+
+      return gNode;
+    }
+
+
+    /**
+     * Finds a range of SPObjects.
+     * @param lowValue The low end of the range.
+     * @param inclLowValue If the low end value is to be included in the results.
+     * @param highValue The high end of the range.
+     * @param inclHighValue If the high end value is to be included in the results.
+     * @return A range of values, in a Tuples.
+     * @throws StringPoolException Any kind of error, both due to internal structure and IO errors.
+     */
+    Tuples findGNodes(SPObject lowValue, boolean inclLowValue, SPObject highValue, boolean inclHighValue) throws StringPoolException {
+      SPObject.TypeCategory typeCategory;
+      int typeId;
+      AVLNode lowAVLNode;
+      long highAVLNodeId;
+
+      if (lowValue == null && highValue == null) {
+        // Return all nodes in the index.
+        typeCategory = null;
+        typeId = SPObjectFactory.INVALID_TYPE_ID;
+        lowAVLNode = avlFilePhase.getRootNode();
+        if (lowAVLNode != null) lowAVLNode = lowAVLNode.getMinNode_R();
+        highAVLNodeId = Block.INVALID_BLOCK_ID;
+      } else {
+        // Get the type category.
+        SPObject typeValue = lowValue != null ? lowValue : highValue;
+        typeCategory = typeValue.getTypeCategory();
+        typeId = typeCategory == SPObject.TypeCategory.TYPED_LITERAL ?
+                 ((SPTypedLiteral)typeValue).getTypeId() : SPObjectFactory.INVALID_TYPE_ID;
+
+        // Check that the two SPObjects are of the same type.
+        if (lowValue != null && highValue != null) {
+          if (
+            typeCategory != highValue.getTypeCategory() || (
+                typeCategory == SPObject.TypeCategory.TYPED_LITERAL &&
+                ((SPTypedLiteral)lowValue).getTypeId() != ((SPTypedLiteral)highValue).getTypeId()
+            )
+          ) {
+            // Type mismatch.
+            throw new StringPoolException("lowValue and highValue are not of the same type");
+          }
+
+          if (lowValue != null && highValue != null) {
+            // Check for lowValue being higher than highValue.
+            // Also check for lowValue being equal to highValue but excluded
+            // by either inclLowValue or inclHighValue being false.
+            int c = lowValue.compareTo(highValue);
+            if (c > 0 || c == 0 && (!inclLowValue || !inclHighValue)) {
+              return new GNodeTuplesImpl(
+                  null, SPObjectFactory.INVALID_TYPE_ID,
+                  null, null, null, Block.INVALID_BLOCK_ID
+              );
+            }
+          }
+        }
+
+        // Compute the comparator for lowValue.
+        AVLComparator lowComparator;
+        if (lowValue != null) {
+          DataStruct lowData = new DataStruct(lowValue);
+          SPComparator spComparator = lowValue.getSPComparator();
+          // lowComparator = new SPAVLComparator(spComparator, typeCategory, typeId, data);
+          lowComparator = new DataAVLComparator(spComparator, lowData, gNodeToDataReadOnly);
+        } else {
+          // Select the first node with the current type.
+          if (typeCategory == SPObject.TypeCategory.TYPED_LITERAL) {
+            lowComparator = new DataCategoryTypeAVLComparator(typeCategory.ID, typeId);
+          } else {
+            lowComparator = new DataCategoryAVLComparator(typeCategory.ID);
+          }
+        }
+
+        // Compute the comparator for highValue.
+        AVLComparator highComparator;
+        if (highValue != null) {
+          DataStruct highData = new DataStruct(highValue);
+          SPComparator spComparator = highValue.getSPComparator();
+          highComparator = new DataAVLComparator(spComparator, highData, gNodeToDataReadOnly);
+        } else {
+          // Select the first node past the last one that has the current type.
+          if (typeCategory == SPObject.TypeCategory.TYPED_LITERAL) {
+            highComparator = new DataCategoryTypeAVLComparator(typeCategory.ID, typeId + 1);
+          } else {
+            highComparator = new DataCategoryAVLComparator(typeCategory.ID + 1);
+          }
+        }
+
+        AVLNode[] findResult = avlFilePhase.find(lowComparator, null);
+        if (findResult == null) {
+          // Empty store.
+          lowAVLNode = null;
+          highAVLNodeId = Block.INVALID_BLOCK_ID;
+        } else {
+          if (findResult.length == 1) {
+            // Found the node exactly.
+            lowAVLNode = findResult[0];
+            // Handle inclLowValue.
+            if (!inclLowValue) {
+              lowAVLNode = lowAVLNode.getNextNode_R();
+
+              // The lowValue passed to the GNodeTuplesImpl constructor
+              // is always inclusive but inclLowValue is false.
+              // Recalculate lowValue.
+              if (lowAVLNode != null) lowValue = loadSPObject(typeCategory, typeId, lowAVLNode);
+            }
+          } else {
+            // Did not find the node but found the location where the node
+            // would be if it existed.
+            if (findResult[0] != null) findResult[0].release();
+            lowAVLNode = findResult[1];
+          }
+
+          if (lowAVLNode != null) {
+            // Find the high node.
+
+            findResult = avlFilePhase.find(highComparator, null);
+            if (findResult.length == 1) {
+              // Found the node exactly.
+              AVLNode highAVLNode = findResult[0];
+              // Handle inclHighValue.
+              if (inclHighValue) {
+                // Step past this node so that it is included in the range.
+                highAVLNode = highAVLNode.getNextNode();
+                if (highAVLNode != null) {
+                  highAVLNodeId = highAVLNode.getId();
+
+                  // The highValue passed to the GNodeTuplesImpl constructor
+                  // is always exclusive but inclHighValue is true.
+                  // Recalculate highValue.
+                  highValue = loadSPObject(typeCategory, typeId, highAVLNode);
+
+                  highAVLNode.release();
+                } else {
+                  highAVLNodeId = Block.INVALID_BLOCK_ID;
+                  highValue = null;
+                }
+              } else {
+                highAVLNodeId = highAVLNode.getId();
+              }
+            } else {
+              // Did not find the node but found the location where the node would be if it existed.
+              highAVLNodeId = findResult[1] != null ? findResult[1].getId() : Block.INVALID_BLOCK_ID;
+            }
+
+            AVLFile.release(findResult);
+          } else {
+            highAVLNodeId = Block.INVALID_BLOCK_ID;
+          }
+        }
+      }
+
+      return new GNodeTuplesImpl(typeCategory, typeId,lowValue, highValue, lowAVLNode, highAVLNodeId);
+    }
+
+
+    /**
+     * Get the entire set of GNodes that match a given type.
+     * @param typeCategory The category of type to match.
+     * @param typeURI The specific type to search for.
+     * @return A tuples containing all GNodes of the requested type.
+     * @throws StringPoolException Caused by a structural error or an IO exception.
+     */
+    Tuples findGNodes(SPObject.TypeCategory typeCategory, URI typeURI) throws StringPoolException {
+
+      // null paramaters mean we want all GNodes
+      if (typeCategory == null) {
+        if (typeURI != null) throw new StringPoolException("typeCategory is null and typeURI is not null");
+        return findAllGNodes();
+      }
+
+      // Convert the type URI to a type ID.
+      int typeId;
+      try {
+        typeId = (typeURI == null) ? SPObjectFactory.INVALID_TYPE_ID : SPO_FACTORY.getTypeId(typeURI);
+      } catch (IllegalArgumentException ex) {
+        throw new StringPoolException("Unsupported XSD type: " + typeURI, ex);
+      }
+
+
+      // get the appropriate comparators for the requested type
+      Pair<AVLComparator,AVLComparator> comparators = getTypeComparators(typeCategory, typeId);
+      AVLComparator lowComparator = comparators.first();
+      AVLComparator highComparator = comparators.second();
+
+      AVLNode lowAVLNode;
+      long highAVLNodeId;
+
+      AVLNode[] findResult = avlFilePhase.find(lowComparator, null);
+      if (findResult == null) {
+        // Empty store.
+        lowAVLNode = null;
+        highAVLNodeId = Block.INVALID_BLOCK_ID;
+      } else {
+        assert findResult.length == 2;
+        lowAVLNode = findResult[1];
+        if (findResult[0] != null) findResult[0].release();
+
+        if (lowAVLNode != null) {
+          // Find the high node.
+          findResult = avlFilePhase.find(highComparator, null);
+          assert findResult.length == 2;
+          highAVLNodeId = findResult[1] != null ? findResult[1].getId() : Block.INVALID_BLOCK_ID;
+          AVLFile.release(findResult);
+        } else {
+          highAVLNodeId = Block.INVALID_BLOCK_ID;
+        }
+      }
+
+      return new GNodeTuplesImpl(typeCategory, typeId, null, null, lowAVLNode, highAVLNodeId);
+    }
+
+
+    /**
+     * Constructs a pair of comparators for finding the lowest and highest AVL nodes for a given type specification.
+     * @param typeCategory The category of nodes from the data pool.
+     * @param typeId The ID of the type, if it is a literal.
+     * @return A pair of comparators for the given type.
+     */
+    private Pair<AVLComparator,AVLComparator> getTypeComparators(SPObject.TypeCategory typeCategory, int typeId) {
+      AVLComparator lowComparator;
+      AVLComparator highComparator;
+
+      if (typeCategory == SPObject.TypeCategory.TYPED_LITERAL && typeId != SPObjectFactory.INVALID_TYPE_ID) {
+        // Return nodes of the specified category and type node.
+        lowComparator = new DataCategoryTypeAVLComparator(typeCategory.ID, typeId);
+        highComparator = new DataCategoryTypeAVLComparator(typeCategory.ID, typeId + 1);
+      } else {
+        // Return nodes of the specified category.
+        lowComparator = new DataCategoryAVLComparator(typeCategory.ID);
+        highComparator = new DataCategoryAVLComparator(typeCategory.ID + 1);
+      }
+      return new Pair<AVLComparator,AVLComparator>(lowComparator, highComparator);
+    }
+
+
+    /**
+     * Retrieves all nodes in the index.
+     * @return A Tuples for all the nodes.
+     */
+    private Tuples findAllGNodes() {
+      AVLNode lowAVLNode = avlFilePhase.getRootNode();
+      if (lowAVLNode != null) lowAVLNode = lowAVLNode.getMinNode_R();
+      return new GNodeTuplesImpl(null, SPObjectFactory.INVALID_TYPE_ID, null, null, lowAVLNode, Block.INVALID_BLOCK_ID);
+    }
+
+
+    /**
+     * Load an SPObject with some type checking.
+     * @param typeCategory The category of the object.
+     * @param typeId The ID for the object type
+     * @param avlNode The node to load the data from.
+     * @return The requested object, or <code>null</code> if the object is not compatible with the request.
+     */
+    private SPObject loadSPObject(SPObject.TypeCategory typeCategory, int typeId, AVLNode avlNode) throws StringPoolException {
+      DataStruct data = new DataStruct(avlNode);
+      try {
+        data.getRemainingBytes(gNodeToDataReadOnly);
+      } catch (IOException e) {
+        throw new StringPoolException("Unable to read data pool", e);
+      }
+
+      int typeCategoryId = data.getTypeCategoryId();
+      if (
+          typeCategoryId == SPObject.TypeCategory.TCID_FREE || // blank node
+          // type mismatch
+          typeCategoryId != typeCategory.ID || (
+              typeCategory == SPObject.TypeCategory.TYPED_LITERAL &&
+              typeId != avlNode.getPayloadByte(IDX_TYPE_ID_B)
+          )
+      ) {
+        return null;
+      }
+      return data.getSPObject();
+    }
+
+
+    /**
+     * Attaches a token to a phase. Used to maintain a reference.
+     */
+    final class Token {
+
+      private AVLFile.Phase.Token avlFileToken;
+
+      /** Constructs a token on the current phase */
+      Token() {
+        avlFileToken = avlFilePhase.use();
+      }
+
+
+      public TreePhase getPhase() {
+        assert avlFileToken != null : "Invalid Token";
+        return TreePhase.this;
+      }
+
+
+      public void release() {
+        assert avlFileToken != null : "Invalid Token";
+        avlFileToken.release();
+        avlFileToken = null;
+      }
+
+    }
+
+
+    /**
+     * An internal representation of the data structures as a singe Tuples.
+     * It would be nice to have this in an external class, but it is imtimately tied
+     * into the current phase and the data pool itself. 
+     */
+    private class GNodeTuplesImpl implements Tuples {
+
+      private static final int INVALID_CARDINALITY = -1;
+
+      /** A cache for the calculated row cardinality. */
+      private int rowCardinality = INVALID_CARDINALITY;
+
+      /**
+       * The low value of the range (inclusive) or null to indicate the lowest possible value
+       * within the type defined by the typeCategory and typeId fields.
+       */
+      private SPObject lowValue;
+
+      /**
+       * The high value of the range (exclusive) or null to indicate the highest possible value
+       * within the type defined by the typeCategory and typeId fields.
+       */
+      private SPObject highValue;
+
+      /** The first index node in the range (inclusive) or null to indicate  an empty Tuples. */
+      private AVLNode lowAVLNode;
+
+      /**
+       * The last index node in the range (exclusive) or Block.INVALID_BLOCK_ID to indicate all
+       * nodes following lowAVLNode in the index.
+       */
+      private long highAVLNodeId;
+
+      /** The current node. */
+      private AVLNode avlNode = null;
+
+      /** Maintains a hold on the phase of the structure being accessed. */
+      AVLFile.Phase.Token avlFileToken = null;
+
+      /** The number of nodes. */
+      private long nrGNodes;
+
+      /** This is set to true once the number of nodes is known. */
+      private boolean nrGNodesValid = false;
+
+      private boolean beforeFirst = false;
+
+      private long[] prefix = null;
+
+      private boolean onPrefixNode = false;
+
+      private Variable[] variables = (Variable[])VARIABLES.clone();
+
+      /**
+       * Constructs a GNodeTuplesImpl that represents nodes in the AVLFile
+       * index that range from lowAVLNode up to but not including the node with
+       * ID highAVLNodeId.
+       * @param avlFilePhase The phase this tuples applies to.
+       * @param typeCategory The type of data this Tuples returns.
+       * @param typeId The ID of the data being returned.
+       * @param lowAVLNode the AVLNode that has the first graph node that is
+       * included in the Tuples.
+       * @param highAVLNodeId the ID of the AVLNode that has the first graph
+       * node that is not included in the Tuples.
+       */
+      GNodeTuplesImpl(
+          SPObject.TypeCategory typeCategory, int typeId,
+          SPObject lowValue, SPObject highValue,
+          AVLNode lowAVLNode, long highAVLNodeId
+      ) {
+
+        if (lowAVLNode != null && lowAVLNode.getId() == highAVLNodeId) {
+          // Low and High are equal - Empty.
+          lowAVLNode.release();
+          lowAVLNode = null;
+          highAVLNodeId = Block.INVALID_BLOCK_ID;
+        }
+
+        if (lowAVLNode == null) {
+          // Empty tuples.
+          typeCategory = null;
+          lowValue = null;
+          highValue = null;
+          if (highAVLNodeId != Block.INVALID_BLOCK_ID) {
+            if (logger.isDebugEnabled()) {
+              logger.debug("lowAVLNode is null but highAVLNodeId is not " +Block.INVALID_BLOCK_ID);
+            }
+            highAVLNodeId = Block.INVALID_BLOCK_ID;
+          }
+          nrGNodes = 0;
+          nrGNodesValid = true;
+        } else {
+          avlFileToken = avlFilePhase.use();
+        }
+
+        if (typeCategory != SPObject.TypeCategory.TYPED_LITERAL) typeId = SPObjectFactory.INVALID_TYPE_ID;
+
+        this.lowValue = lowValue;
+        this.highValue = highValue;
+        this.lowAVLNode = lowAVLNode;
+        this.highAVLNodeId = highAVLNodeId;
+      }
+
+
+      /**
+       * Get the gNode of from the cursor at this point. This is read directly from the AVLNode.
+       * @see org.mulgara.store.tuples.Tuples#getColumnValue(int)
+       */
+      public long getColumnValue(int column) throws TuplesException {
+        if (column != 0) throw new TuplesException("Column index out of range: " + column);
+        // Handle the prefix.
+        if (onPrefixNode) return prefix[0];
+        if (avlNode == null) throw new TuplesException("No current row");
+        return avlNode.getPayloadLong(DataStruct.IDX_GRAPH_NODE);
+      }
+
+
+      /**
+       * @see #getColumnValue(int)
+       */
+      public long getRawColumnValue(int column) throws TuplesException {
+        return getColumnValue(column);
+      }
+
+
+      /**
+       * Returns the single variable name for this data.
+       */
+      public Variable[] getVariables() {
+        // Clone the variables array in case the caller changes the returned array.
+        return (Variable[])variables.clone();
+      }
+
+
+      /** @return 1, indicating the single column from this data. */
+      public int getNumberOfVariables() {
+        return 1;
+      }
+
+
+      /**
+       * Accumulates the size of this data and returns the number of nodes.
+       * This has scope for improvement, if nodes start storing the numbers of decendants.
+       * @see org.mulgara.store.tuples.Tuples#getRowCount()
+       */
+      public long getRowCount() throws TuplesException {
+        if (!nrGNodesValid) {
+          assert lowAVLNode != null;
+          AVLNode n = lowAVLNode;
+          n.incRefCount();
+          long count = 0;
+          while (n != null && (highAVLNodeId == Block.INVALID_BLOCK_ID || n.getId() != highAVLNodeId)) {
+            ++count;
+            n = n.getNextNode_R();
+          }
+          if (n != null) n.release();
+          nrGNodes = count;
+          nrGNodesValid = true;
+        }
+        return nrGNodes;
+      }
+
+
+      /** Delegates this work to {@link #getRowCount()} */
+      public long getRowUpperBound() throws TuplesException {
+        return getRowCount();
+      }
+
+      /**
+       * Return the cardinality of the tuples.
+       *
+       * @return <code>Cursor.ZERO</code> if the size of this tuples is 0,
+       *         <code>Cursor.ONE</code> if the size is 1,
+       *         <code>Cursor.MANY</code> if the size of this tuples is 2 or more.
+       * @throws TuplesException If there is an error accessing the underlying data.
+       */
+      public int getRowCardinality() throws TuplesException {
+        if (rowCardinality != INVALID_CARDINALITY) return rowCardinality;
+
+        long count = 0;
+        if (nrGNodesValid) {
+          count = nrGNodes;
+        } else {
+          assert lowAVLNode != null;
+          AVLNode n = lowAVLNode;
+          n.incRefCount();
+          while (count < 2 && n != null && (highAVLNodeId == Block.INVALID_BLOCK_ID || n.getId() != highAVLNodeId)) {
+            ++count;
+            n = n.getNextNode_R();
+          }
+          if (n != null) n.release();
+        }
+        rowCardinality = count == 0 ? Cursor.ZERO :
+                         count == 1 ? Cursor.ONE : Cursor.MANY;
+        return rowCardinality;
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#getColumnIndex(org.mulgara.query.Variable) */
+      public int getColumnIndex(Variable variable) throws TuplesException {
+        if (variable == null) throw new IllegalArgumentException("variable is null");
+        if (variable.equals(variables[0]))  return 0;
+        throw new TuplesException("variable doesn't match any column: " + variable);
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#isColumnEverUnbound(int) */
+      public boolean isColumnEverUnbound(int column) {
+        return false;
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#isMaterialized() */
+      public boolean isMaterialized() {
+        return true;
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#isUnconstrained() */
+      public boolean isUnconstrained() {
+        return false;
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#hasNoDuplicates() */
+      public boolean hasNoDuplicates() {
+        return true;
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#getComparator() */
+      public RowComparator getComparator() {
+        return null;  // Unsorted
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#getOperands() */
+      public java.util.List<Tuples> getOperands() {
+        return java.util.Collections.emptyList();
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#beforeFirst(long[], int) */
+      public void beforeFirst(long[] prefix, int suffixTruncation) throws TuplesException {
+        assert prefix != null;
+        if (prefix.length > 1) throw new TuplesException("prefix.length (" + prefix.length + ") > nrColumns (1)");
+        if (suffixTruncation != 0) throw new TuplesException("suffixTruncation not supported");
+
+        beforeFirst = true;
+        onPrefixNode = false;
+        this.prefix = prefix;
+        // check if this had been iterating, if so then forget where we were
+        if (avlNode != null) {
+          avlNode.release();
+          avlNode = null;
+        }
+      }
+
+
+      /** @see org.mulgara.query.Cursor#beforeFirst() */
+      public void beforeFirst() throws TuplesException {
+        beforeFirst(Tuples.NO_PREFIX, 0);
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#next() */
+      public boolean next() throws TuplesException {
+        if (beforeFirst) {
+          assert prefix != null;
+          assert avlNode == null;
+          assert !onPrefixNode;
+          beforeFirst = false;
+
+          // Handle the prefix.
+          if (prefix.length == 1) {
+            // If there are no nodes this Tuples can't contain the prefix node.
+            if (lowAVLNode == null) return false;
+
+            SPObject spObject;
+            try {
+              // FIXME check the type category and type node.
+              spObject = findSPObject(prefix[0]);
+            } catch (StringPoolException ex) {
+              throw new TuplesException("Exception while loading SPObject", ex);
+            }
+
+            // Check that the SPObject is within range.
+            onPrefixNode = spObject != null &&
+                           (lowValue == null || spObject.compareTo(lowValue) >= 0) &&
+                           (highValue == null || spObject.compareTo(highValue) < 0);
+            return onPrefixNode;
+          }
+
+          if (lowAVLNode != null) {
+            lowAVLNode.incRefCount();
+            avlNode = lowAVLNode;
+          }
+        } else if (avlNode != null) {
+          avlNode = avlNode.getNextNode_R();
+          if (avlNode != null) {
+            // Check if this is the highNode.
+            if (highAVLNodeId != Block.INVALID_BLOCK_ID && avlNode.getId() == highAVLNodeId ) {
+              avlNode.release();
+              avlNode = null;
+            }
+          }
+        }
+        onPrefixNode = false;
+        return avlNode != null;
+      }
+
+
+      /**
+       * Release the resources reserved by having this tuples refering to the phase.
+       * @see org.mulgara.query.Cursor#close()
+       */
+      public void close() throws TuplesException {
+        if (lowAVLNode != null) {
+          if (avlNode != null) {
+            avlNode.release();
+            avlNode = null;
+          }
+          lowAVLNode.release();
+          lowAVLNode = null;
+          avlFileToken.release();
+          avlFileToken = null;
+        }
+      }
+
+
+      /** @see org.mulgara.store.tuples.Tuples#renameVariables(org.mulgara.query.Constraint) */
+      public void renameVariables(Constraint constraint) {
+        variables[0] = (Variable)constraint.getElement(0);
+      }
+
+
+      /** Duplicate this tuples and its resources. */
+      public Object clone() {
+        try {
+          GNodeTuplesImpl t = (GNodeTuplesImpl)super.clone();
+          t.variables = (Variable[])variables.clone();
+          if (t.lowAVLNode != null) {
+            t.lowAVLNode.incRefCount();
+            t.avlFileToken = avlFilePhase.use(); // Allocate a new token.
+            if (t.avlNode != null) t.avlNode.incRefCount();
+          }
+          return t;
+        } catch (CloneNotSupportedException e) {
+          throw new Error(getClass() + " doesn't support clone, which it must", e);
+        }
+      }
+
+
+      /**
+       * Iterate over this object and see it looks the same as the comparing object.
+       */
+      public boolean equals(Object o) {
+        boolean isEqual = false;
+
+        // Make sure it's not null
+        if (o != null) {
+          try {
+            // Try and cast the passed object - if not then they aren't equal.
+            Tuples testTuples = (Tuples) o;
+
+            // Ensure that the row count is the same
+            if (getRowCount() == testTuples.getRowCount()) {
+              // Ensure that the variable lists are equal
+              if (java.util.Arrays.asList(getVariables()).equals(
+                  java.util.Arrays.asList(testTuples.getVariables()))) {
+                // Clone tuples to be compared
+                Tuples t1 = (Tuples) clone();
+                Tuples t2 = (Tuples) testTuples.clone();
+
+                try {
+                  // Put them at the start.
+                  t1.beforeFirst();
+                  t2.beforeFirst();
+
+                  boolean finished = false;
+                  boolean tuplesEqual = true;
+
+                  // Repeat until there are no more rows or we find an unequal row.
+                  while (!finished) {
+                    // Assume that if t1 has next so does t2.
+                    finished = !t1.next();
+                    t2.next();
+
+                    // If we're not finished compare the row.
+                    if (!finished) {
+                      // Check if the elements in both rows are equal.
+                      for (int variableIndex = 0; variableIndex < t1.getNumberOfVariables(); variableIndex++) {
+                        // If they're not equal quit the loop and set tuplesEqual to false.
+                        if (t1.getColumnValue(variableIndex) != t2.getColumnValue(variableIndex)) {
+                          tuplesEqual = false;
+                          finished = true;
+                        }
+                      }
+                    }
+                  }
+
+                  isEqual = tuplesEqual;
+                } finally {
+                  t1.close();
+                  t2.close();
+                }
+              }
+            }
+          } catch (ClassCastException cce) {
+            // Not of the correct type return false.
+          } catch (TuplesException ex) {
+            throw new RuntimeException(ex.toString(), ex);
+          }
+        }
+
+        return isEqual;
+      }
+
+
+      /** @see java.lang.Object#toString() */
+      public String toString() {
+        return SimpleTuplesFormat.format(this);
+      }
+
+
+      /**
+       * Copied from AbstractTuples
+       */
+      public Annotation getAnnotation(Class<?> annotationClass) throws TuplesException {
+        return null;
+      }
+
+    }  // end of TreePhase.GNodeTuplesImpl
+    
+  }  // end of TreePhase
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImplTest.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImplTest.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolImplTest.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,865 @@
+/*
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * The Original Code is the Kowari Metadata Store.
+ *
+ * The Initial Developer of the Original Code is Plugged In Software Pty
+ * Ltd (http://www.pisoftware.com, mailto:info at pisoftware.com). Portions
+ * created by Plugged In Software Pty Ltd are Copyright (C) 2001,2002
+ * Plugged In Software Pty Ltd. All Rights Reserved.
+ *
+ * Contributor(s): N/A.
+ *
+ * [NOTE: The text of this Exhibit A may differ slightly from the text
+ * of the notices in the Source Code files of the Original Code. You
+ * should use the text of this Exhibit A rather than the text found in the
+ * Original Code Source Code for Your Modifications.]
+ *
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.io.*;
+import java.net.*;
+
+// Java 2 standard packages
+import java.util.*;
+
+// third party packages
+import junit.framework.*;
+import org.apache.log4j.Logger;
+
+
+// locally written packages
+import org.mulgara.query.Cursor;
+import org.mulgara.query.TuplesException;
+import org.mulgara.query.rdf.*;
+import org.mulgara.store.*;
+import org.mulgara.store.nodepool.NodePool;
+import org.mulgara.store.stringpool.*;
+import org.mulgara.store.stringpool.xa.SPObjectFactoryImpl;
+import org.mulgara.store.tuples.Tuples;
+import org.mulgara.store.xa.SimpleXAResource;
+import org.mulgara.store.xa.XAStringPool;
+import org.mulgara.util.TempDir;
+
+
+/**
+ * Test case for {@link XA11StringPoolImpl}.
+ *
+ * @created 2008-08-20
+ *
+ * @author <a href="http://staff.pisoftware.com/pag">Paul Gearon</a>
+ * @licence <a href="{@docRoot}/../../LICENCE">Mozilla Public License v1.1</a>
+ */
+public class XA11StringPoolImplTest extends TestCase {
+
+  @SuppressWarnings("unused")
+  private final static Logger logger = Logger.getLogger(XA11StringPoolImplTest.class);
+
+  /** Persistent data to hand dataToNodes from one test to the next. */
+  private static Map<SPObject,Long> persistedDataToNodes;
+
+  /** Persistent data to hand nodesToData from one test to the next. */
+  private static Map<Long,SPObject> persistedNodesToData;
+
+  /** The data pool being tested. */
+  protected XAStringPool stringPool;
+
+  /** A record of the data stored in the data pool, mapped to the nodes. */
+  protected Map<SPObject,Long> dataToNodes;
+
+  /** A record of the nodes stored in the data pool, mapped to the data. */
+  protected Map<Long,SPObject> nodesToData;
+
+  /** The Factory building objects for this data pool. */
+  protected SPObjectFactory spoFactory;
+
+  /**
+   * Constructor.
+   * @param name The name of the test. Used for logging.
+   */
+  public XA11StringPoolImplTest(String name) {
+    super(name);
+    spoFactory = SPObjectFactoryImpl.getInstance();
+  }
+
+
+  /**
+   * Hook for test runner to obtain a test suite from.
+   * @return The test suite to run.
+   */
+  public static Test suite() {
+    //return new TestSuite(StringPoolImplTest.class);
+    TestSuite suite = new TestSuite();
+    suite.addTest(new XA11StringPoolImplTest("testPut"));
+    suite.addTest(new XA11StringPoolImplTest("testFindNode"));
+    suite.addTest(new XA11StringPoolImplTest("testFindString"));
+    suite.addTest(new XA11StringPoolImplTest("testPersistence1"));
+    suite.addTest(new XA11StringPoolImplTest("testPersistence2"));
+    suite.addTest(new XA11StringPoolImplTest("testNewPhase"));
+    suite.addTest(new XA11StringPoolImplTest("testAllTypes"));
+    suite.addTest(new XA11StringPoolImplTest("testFindNodes"));
+    return suite;
+  }
+
+
+  /**
+   * Default test runner.
+   * @param args The command line arguments
+   */
+  public static void main(String[] args) {
+    junit.textui.TestRunner.run(suite());
+  }
+
+
+  /**
+   * Test {@link StringPool#put}.
+   */
+  public void testPut() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+    simpleConfig();
+
+    try {
+      put("foo");
+      fail("Successfully added \"foo\", but object \"foo\" already exists");
+    } catch (StringPoolException e) { }
+
+    try {
+      put("bar");
+      fail("Successfully added 3:\"bar\", but object \"bar\" already exists");
+    } catch (StringPoolException e) { }
+
+
+    long node = stringPool.put(spoFactory.newSPString("quux"));
+    assertFalse(alreadyAssigned(node));
+    node = stringPool.put(spoFactory.newSPString(""));
+    assertFalse(alreadyAssigned(node));
+  }
+
+
+  /**
+   * Test {@link StringPool#findGNode}.
+   */
+  public void testFindNode() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+    simpleConfig();
+
+    testNodeRetrieval("foo");
+    testNodeRetrieval("bar");
+    assertEquals(NodePool.NONE, stringPool.findGNode(spoFactory.newSPString("quux")));
+  }
+
+  /**
+   * Test {@link StringPool#findSPObject}.
+   */
+  public void testFindString() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+    simpleConfig();
+
+    testObjRetrieval("foo");
+    testObjRetrieval("bar");
+    assertEquals(stringPool.findSPObject(10000), null);
+  }
+
+  /**
+   * Tests persistence procedure
+   */
+  public void testPersistence1() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+
+    put(spoFactory.newSPURI(XSD.DOUBLE_URI));
+    put(spoFactory.newSPURI(XSD.DATE_TIME_URI));
+
+    put("alpha");
+    put("bravo");
+    put("charlie");
+    put("delta");
+    put("echo");
+    put("foxtrot");
+
+    ((SimpleXAResource) stringPool).prepare();
+    ((SimpleXAResource) stringPool).commit();
+    saveState();
+  }
+
+
+  /**
+   * Tests reads after persistence. Persisted data ws written in {@link #testPersistence1()}.
+   */
+  public void testPersistence2() throws Exception {
+    loadState();
+    int[] phases = ((SimpleXAResource) stringPool).recover();
+    assertEquals(1, phases.length);
+    assertEquals(1, phases[0]);
+    ((SimpleXAResource) stringPool).selectPhase(phases[0]);
+
+    testNodeRetrieval("alpha");
+    testNodeRetrieval("bravo");
+    testNodeRetrieval("charlie");
+    testNodeRetrieval("delta");
+    testNodeRetrieval("echo");
+    testNodeRetrieval("foxtrot");
+    assertEquals(NodePool.NONE, stringPool.findGNode(spoFactory.newSPString("golf")));
+
+    testObjRetrieval("alpha");
+    testObjRetrieval("bravo");
+    testObjRetrieval("charlie");
+    testObjRetrieval("delta");
+    testObjRetrieval("echo");
+    testObjRetrieval("foxtrot");
+    long lastNode = stringPool.findGNode(spoFactory.newSPString("foxtrot"));
+    assertNull(stringPool.findSPObject(lastNode + 16));  // 16 is the on-disk size for foxtrot
+
+    put("golf");
+
+    ((SimpleXAResource) stringPool).prepare();
+    ((SimpleXAResource) stringPool).commit();
+    saveState();
+  }
+
+
+  /**
+   * Tests read only phases.
+   */
+  public void testNewPhase() throws Exception {
+    loadState();
+    int[] phases = ((SimpleXAResource) stringPool).recover();
+    assertEquals(1, phases.length);
+    assertEquals(2, phases[0]);
+    ((SimpleXAResource) stringPool).selectPhase(phases[0]);
+
+    testObjRetrieval("golf");
+    testNodeRetrieval("golf");
+
+    XA11StringPoolImpl.ReadOnlyStringPool roStringPool =
+        ((XA11StringPoolImpl)stringPool).new ReadOnlyStringPool();
+    roStringPool.refresh();
+
+    put("hotel");
+    put("india");
+    put("juliet");
+    put("kilo");
+    put("lima");
+
+    testNodeRetrieval(roStringPool, "alpha");
+    testNodeRetrieval(roStringPool, "bravo");
+    testNodeRetrieval(roStringPool, "charlie");
+    testNodeRetrieval(roStringPool, "delta");
+    testNodeRetrieval(roStringPool, "echo");
+    testNodeRetrieval(roStringPool, "foxtrot");
+    testNodeRetrieval(roStringPool, "golf");
+
+    testObjRetrieval(roStringPool, "alpha");
+    testObjRetrieval(roStringPool, "bravo");
+    testObjRetrieval(roStringPool, "charlie");
+    testObjRetrieval(roStringPool, "delta");
+    testObjRetrieval(roStringPool, "echo");
+    testObjRetrieval(roStringPool, "foxtrot");
+    testObjRetrieval(roStringPool, "golf");
+
+    // Now that this is WORM, we are retrieving data from newer phases as well
+    testNodeRetrieval(roStringPool, "hotel");
+    testNodeRetrieval(roStringPool, "india");
+    testNodeRetrieval(roStringPool, "juliet");
+    testNodeRetrieval(roStringPool, "kilo");
+    testNodeRetrieval(roStringPool, "lima");
+    assertEquals(NodePool.NONE, roStringPool.findGNode(spoFactory.newSPString("mike")));
+
+    ((SimpleXAResource) stringPool).prepare();
+    ((SimpleXAResource) stringPool).commit();
+
+    testNodeRetrieval(roStringPool, "alpha");
+    testNodeRetrieval(roStringPool, "bravo");
+    testNodeRetrieval(roStringPool, "charlie");
+    testNodeRetrieval(roStringPool, "delta");
+    testNodeRetrieval(roStringPool, "echo");
+    testNodeRetrieval(roStringPool, "foxtrot");
+    testNodeRetrieval(roStringPool, "golf");
+    testNodeRetrieval(roStringPool, "hotel");
+    testNodeRetrieval(roStringPool, "india");
+    testNodeRetrieval(roStringPool, "juliet");
+    testNodeRetrieval(roStringPool, "kilo");
+    testNodeRetrieval(roStringPool, "lima");
+
+    testObjRetrieval(roStringPool, "alpha");
+    testObjRetrieval(roStringPool, "bravo");
+    testObjRetrieval(roStringPool, "charlie");
+    testObjRetrieval(roStringPool, "delta");
+    testObjRetrieval(roStringPool, "echo");
+    testObjRetrieval(roStringPool, "foxtrot");
+    testObjRetrieval(roStringPool, "golf");
+    testObjRetrieval(roStringPool, "hotel");
+    testObjRetrieval(roStringPool, "india");
+    testObjRetrieval(roStringPool, "juliet");
+    testObjRetrieval(roStringPool, "kilo");
+    testObjRetrieval(roStringPool, "lima");
+
+    roStringPool.refresh();
+
+    testNodeRetrieval(roStringPool, "alpha");
+    testNodeRetrieval(roStringPool, "bravo");
+    testNodeRetrieval(roStringPool, "charlie");
+    testNodeRetrieval(roStringPool, "delta");
+    testNodeRetrieval(roStringPool, "echo");
+    testNodeRetrieval(roStringPool, "foxtrot");
+    testNodeRetrieval(roStringPool, "golf");
+    testNodeRetrieval(roStringPool, "hotel");
+    testNodeRetrieval(roStringPool, "india");
+    testNodeRetrieval(roStringPool, "juliet");
+    testNodeRetrieval(roStringPool, "kilo");
+    testNodeRetrieval(roStringPool, "lima");
+
+    testObjRetrieval(roStringPool, "alpha");
+    testObjRetrieval(roStringPool, "bravo");
+    testObjRetrieval(roStringPool, "charlie");
+    testObjRetrieval(roStringPool, "delta");
+    testObjRetrieval(roStringPool, "echo");
+    testObjRetrieval(roStringPool, "foxtrot");
+    testObjRetrieval(roStringPool, "golf");
+    testObjRetrieval(roStringPool, "hotel");
+    testObjRetrieval(roStringPool, "india");
+    testObjRetrieval(roStringPool, "juliet");
+    testObjRetrieval(roStringPool, "kilo");
+    testObjRetrieval(roStringPool, "lima");
+
+  }
+
+
+  /**
+   * Tests storage and retrieval of several data types
+   */
+  public void testAllTypes() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+
+    put(spoFactory.newSPURI(XSD.DOUBLE_URI));
+    put(spoFactory.newSPURI(XSD.DATE_TIME_URI));
+
+    assertTrue(stringPool.remove(10013));
+    assertTrue(stringPool.remove(10015));
+
+    put(spoFactory.newSPString("alpha"));
+    put(spoFactory.newSPString("bravo"));
+    put(spoFactory.newSPURI(new URI("http://charlie/")));
+    put(spoFactory.newSPURI(new URI("http://delta/")));
+    put(spoFactory.newSPDouble(42));
+    put(spoFactory.newSPDouble(123));
+    put(spoFactory.newSPTypedLiteral("1966-09-18T15:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1971-12-20T00:20:00", XSD.DATE_TIME_URI));
+
+    testNodeRetrieval(spoFactory.newSPString("alpha"));
+    testNodeRetrieval(spoFactory.newSPString("bravo"));
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://charlie/")));
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://delta/")));
+    testNodeRetrieval(spoFactory.newSPDouble(42));
+    testNodeRetrieval(spoFactory.newSPDouble(123));
+    testNodeRetrieval(spoFactory.newSPTypedLiteral("1966-09-18T15:00:00", XSD.DATE_TIME_URI));
+    testNodeRetrieval(spoFactory.newSPTypedLiteral("1971-12-20T00:20:00", XSD.DATE_TIME_URI));
+
+    // Make sure that URIs don't appear as strings.
+    assertEquals(NodePool.NONE, stringPool.findGNode(spoFactory.newSPString("http://charlie/")));
+    assertEquals(NodePool.NONE, stringPool.findGNode(spoFactory.newSPString("http://delta/")));
+
+    assertTrue(stringPool.remove(13));
+    assertTrue(stringPool.remove(15));
+
+    ((SimpleXAResource) stringPool).prepare();
+    ((SimpleXAResource) stringPool).commit();
+
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://charlie/")));
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://delta/")));
+    testNodeRetrieval(spoFactory.newSPDouble(42));
+
+    assertTrue(stringPool.remove(14));
+
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://charlie/")));
+    testNodeRetrieval(spoFactory.newSPURI(new URI("http://delta/")));
+    testNodeRetrieval(spoFactory.newSPDouble(42));
+
+  }
+
+
+  /**
+   * Tests range searching.
+   */
+  public void testFindNodes() throws Exception {
+    ((SimpleXAResource) stringPool).clear();
+
+    put(spoFactory.newSPURI(XSD.DOUBLE_URI));
+    put(spoFactory.newSPURI(XSD.DATE_TIME_URI));
+
+    // Populate the string pool.
+    put(spoFactory.newSPString("alpha"));
+    put(spoFactory.newSPString("bravo"));
+    put(spoFactory.newSPURI(new URI("http://charlie/")));
+    put(spoFactory.newSPURI(new URI("http://delta/")));
+    put(spoFactory.newSPDouble(42));
+    put(spoFactory.newSPDouble(123));
+    put(spoFactory.newSPTypedLiteral("1966-09-18T15:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1971-12-20T00:20:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPString("foxtrot"));
+    put(spoFactory.newSPString("golf"));
+    put(spoFactory.newSPString("hotel"));
+    put(spoFactory.newSPString("charlie"));
+    put(spoFactory.newSPString("delta"));
+    put(spoFactory.newSPString("juliet"));
+    put(spoFactory.newSPString("kilo"));
+    put(spoFactory.newSPString("echo"));
+    put(spoFactory.newSPString("india"));
+    put(spoFactory.newSPString("lima"));
+    put(spoFactory.newSPDouble(3.14159265358979323846));
+    put(spoFactory.newSPDouble(-10));
+    put(spoFactory.newSPDouble(99999));
+    put(spoFactory.newSPDouble(1000));
+    put(spoFactory.newSPDouble(1000.001));
+    put(spoFactory.newSPDouble(321));
+    put(spoFactory.newSPDouble(1234));
+    put(spoFactory.newSPDouble(1111));
+    put(spoFactory.newSPDouble(1001));
+    put(spoFactory.newSPDouble(1002));
+    put(spoFactory.newSPDouble(1003));
+    put(spoFactory.newSPDouble(90));
+    put(spoFactory.newSPTypedLiteral("1977-01-01T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1968-07-05T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1981-01-10T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1999-09-09T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1977-01-01T00:00:01", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("2000-01-01T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1999-12-31T23:59:59", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1977-01-01T00:00:02", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1970-01-01T00:00:00", XSD.DATE_TIME_URI));
+    put(spoFactory.newSPTypedLiteral("1969-12-31T23:59:59", XSD.DATE_TIME_URI));
+
+    List<Long> allStrings = new ArrayList<Long>();
+    allStrings.add(mappedVal("alpha"));
+    allStrings.add(mappedVal("bravo"));
+    allStrings.add(mappedVal("charlie"));
+    allStrings.add(mappedVal("delta"));
+    allStrings.add(mappedVal("echo"));
+    allStrings.add(mappedVal("foxtrot"));
+    allStrings.add(mappedVal("golf"));
+    allStrings.add(mappedVal("hotel"));
+    allStrings.add(mappedVal("india"));
+    allStrings.add(mappedVal("juliet"));
+    allStrings.add(mappedVal("kilo"));
+    allStrings.add(mappedVal("lima"));
+
+    // Get all SPString objects.
+    Tuples t = stringPool.findGNodes(
+        SPObject.TypeCategory.UNTYPED_LITERAL, null
+    );
+    assertEquals(allStrings, asList(t));
+    assertEquals(t.getRowCardinality(), Cursor.MANY);
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delta"), true,
+        spoFactory.newSPString("hotel"), true
+    );
+    assertEquals(allStrings.subList(3, 8), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delt"), true,
+        spoFactory.newSPString("hotels"), true
+    );
+    assertEquals(allStrings.subList(3, 8), asList(t));
+    t.close();
+
+    // SPStrings objects are case insensitive.
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("DELT"), true,
+        spoFactory.newSPString("HOTELS"), true
+    );
+    assertEquals(allStrings.subList(3, 8), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delt"), false,
+        spoFactory.newSPString("hotels"), false
+    );
+    assertEquals(allStrings.subList(3, 8), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delta"), false,
+        spoFactory.newSPString("hotel"), true
+    );
+    assertEquals(allStrings.subList(4, 8), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("deltas"), true,
+        spoFactory.newSPString("hotel"), true
+    );
+    assertEquals(allStrings.subList(4, 8), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delta"), true,
+        spoFactory.newSPString("hotel"), false
+    );
+    assertEquals(allStrings.subList(3, 7), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("delta"), false,
+        spoFactory.newSPString("hotel"), false
+    );
+    assertEquals(allStrings.subList(4, 7), asList(t));
+    t.close();
+
+    List<Long> allDoubles = new ArrayList<Long>();
+    allDoubles.add(mappedDbl(-10));
+    allDoubles.add(mappedDbl(3.14159265358979323846));
+    allDoubles.add(mappedDbl(42));
+    allDoubles.add(mappedDbl(90));
+    allDoubles.add(mappedDbl(123));
+    allDoubles.add(mappedDbl(321));
+    allDoubles.add(mappedDbl(1000));
+    allDoubles.add(mappedDbl(1000.001));
+    allDoubles.add(mappedDbl(1001));
+    allDoubles.add(mappedDbl(1002));
+    allDoubles.add(mappedDbl(1003));
+    allDoubles.add(mappedDbl(1111));
+    allDoubles.add(mappedDbl(1234));
+    allDoubles.add(mappedDbl(99999));
+
+    t = stringPool.findGNodes(
+        SPObject.TypeCategory.TYPED_LITERAL, XSD.DOUBLE_URI
+    );
+    assertEquals(allDoubles, asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(90), true,
+        spoFactory.newSPDouble(1003), true
+    );
+    assertEquals(allDoubles.subList(3, 11), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(89.99999), true,
+        spoFactory.newSPDouble(1003.00001), true
+    );
+    assertEquals(allDoubles.subList(3, 11), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(89.99999), false,
+        spoFactory.newSPDouble(1003.00001), false
+    );
+    assertEquals(allDoubles.subList(3, 11), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(90), false,
+        spoFactory.newSPDouble(1003), true
+    );
+    assertEquals(allDoubles.subList(4, 11), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(90.00001), true,
+        spoFactory.newSPDouble(1003), true
+    );
+    assertEquals(allDoubles.subList(4, 11), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(90), true,
+        spoFactory.newSPDouble(1003), false
+    );
+    assertEquals(allDoubles.subList(3, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPDouble(90), false,
+        spoFactory.newSPDouble(1003), false
+    );
+    assertEquals(allDoubles.subList(4, 10), asList(t));
+    t.close();
+
+    List<Long> allDates = new ArrayList<Long>();
+    allDates.add(mappedDate("1966-09-18T15:00:00")); 
+    allDates.add(mappedDate("1968-07-05T00:00:00")); 
+    allDates.add(mappedDate("1969-12-31T23:59:59")); 
+    allDates.add(mappedDate("1970-01-01T00:00:00")); 
+    allDates.add(mappedDate("1971-12-20T00:20:00")); 
+    allDates.add(mappedDate("1977-01-01T00:00:00"));
+    allDates.add(mappedDate("1977-01-01T00:00:01")); 
+    allDates.add(mappedDate("1977-01-01T00:00:02"));
+    allDates.add(mappedDate("1981-01-10T00:00:00")); 
+    allDates.add(mappedDate("1999-09-09T00:00:00")); 
+    allDates.add(mappedDate("1999-12-31T23:59:59")); 
+    allDates.add(mappedDate("2000-01-01T00:00:00")); 
+
+    t = stringPool.findGNodes(
+        SPObject.TypeCategory.TYPED_LITERAL, XSD.DATE_TIME_URI
+    );
+    assertEquals(allDates, asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-20T00:20:00", XSD.DATE_TIME_URI
+        ), true,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-09T00:00:00", XSD.DATE_TIME_URI
+        ), true
+    );
+    assertEquals(allDates.subList(4, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-19T00:00:00", XSD.DATE_TIME_URI
+        ), true,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-10T00:00:00", XSD.DATE_TIME_URI
+        ), true
+    );
+    assertEquals(allDates.subList(4, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-19T00:00:00", XSD.DATE_TIME_URI
+        ), false,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-10T00:00:00", XSD.DATE_TIME_URI
+        ), false
+    );
+    assertEquals(allDates.subList(4, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-20T00:20:00", XSD.DATE_TIME_URI
+        ), false,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-09T00:00:00", XSD.DATE_TIME_URI
+        ), true
+    );
+    assertEquals(allDates.subList(5, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-21T00:00:00", XSD.DATE_TIME_URI
+        ), true,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-09T00:00:00", XSD.DATE_TIME_URI
+        ), true
+    );
+    assertEquals(allDates.subList(5, 10), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-20T00:20:00", XSD.DATE_TIME_URI
+        ), true,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-09T00:00:00", XSD.DATE_TIME_URI
+        ), false
+    );
+    assertEquals(allDates.subList(4, 9), asList(t));
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1971-12-20T00:20:00", XSD.DATE_TIME_URI
+        ), false,
+        spoFactory.newSPTypedLiteral(
+            "1999-09-09T00:00:00", XSD.DATE_TIME_URI
+        ), false
+    );
+    assertEquals(allDates.subList(5, 9), asList(t));
+    t.close();
+
+    // Matching high value on last node in index (inclusive).
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "1977-01-01T00:00:01", XSD.DATE_TIME_URI
+        ), false,
+        spoFactory.newSPTypedLiteral(
+            "2000-01-01T00:00:00", XSD.DATE_TIME_URI
+        ), true
+    );
+    assertEquals(allDates.subList(7, 12), asList(t));
+    t.close();
+
+    // Regression test for NPE when explicitly matching lowValue (exclusive) on
+    // last node in index.
+    t = stringPool.findGNodes(
+        spoFactory.newSPTypedLiteral(
+            "2000-01-01T00:00:00", XSD.DATE_TIME_URI
+        ), false, null, false
+    );
+    assertTrue(asList(t).isEmpty());
+    assertEquals(t.getRowCardinality(), Cursor.ZERO);
+    // Regression test.
+    t.beforeFirst(new long[] {mappedDbl(99999)}, 0);
+    assertTrue(!t.next());
+    t.close();
+
+    t = stringPool.findGNodes(
+        spoFactory.newSPString("alpha"), true,
+        spoFactory.newSPString("apple"), false
+    );
+    assertEquals(t.getRowCardinality(), Cursor.ONE);
+    t.beforeFirst();
+    assertTrue(t.next());
+    t.close();
+  }
+
+  /**
+   * Converts a single column Tuples to a List of Longs.
+   */
+  static List<Long> asList(Tuples t) throws TuplesException {
+    List<Long> l = new ArrayList<Long>();
+    long rowCount = t.getRowCount();
+
+    t.beforeFirst();
+    while (t.next()) l.add(new Long(t.getColumnValue(0)));
+
+    if (rowCount != l.size()) {
+      throw new AssertionError("Actual number of rows (" + l.size() + ") is not equal to reported row count (" + rowCount + ")");
+    }
+
+    return l;
+  }
+
+
+  void testNodeRetrieval(String s) throws StoreException {
+    testNodeRetrieval(spoFactory.newSPString(s));
+  }
+
+
+  void testNodeRetrieval(SPObject obj) throws StoreException {
+    testNodeRetrieval(stringPool, obj);
+  }
+
+
+  void testNodeRetrieval(XA11StringPoolImpl.ReadOnlyStringPool sp, String s) throws StoreException {
+    testNodeRetrieval(sp, spoFactory.newSPString(s));
+  }
+
+
+  void testNodeRetrieval(XAStringPool sp, SPObject obj) throws StoreException {
+    assertEquals(dataToNodes.get(obj).longValue(), sp.findGNode(obj));
+  }
+
+
+  void testObjRetrieval(String s) throws StoreException {
+    testObjRetrieval(spoFactory.newSPString(s));
+  }
+
+
+  void testObjRetrieval(SPObject obj) throws StoreException {
+    testObjRetrieval(stringPool, obj);
+  }
+
+
+  void testObjRetrieval(XA11StringPoolImpl.ReadOnlyStringPool sp, String s) throws StoreException {
+    testObjRetrieval(sp, spoFactory.newSPString(s));
+  }
+
+
+  void testObjRetrieval(XAStringPool sp, SPObject obj) throws StoreException {
+    long node = dataToNodes.get(obj);
+    assertEquals(nodesToData.get(node), sp.findSPObject(node));
+  }
+
+
+  boolean alreadyAssigned(long node) {
+    return nodesToData.keySet().contains(node);
+  }
+
+
+  long put(String s) throws StoreException {
+    return put(spoFactory.newSPString(s));
+  }
+
+
+  long put(SPObject data) throws StoreException {
+    long node = stringPool.put(data);
+    dataToNodes.put(data, node);
+    nodesToData.put(node, data);
+    return node;
+  }
+
+  long mappedVal(String s) {
+    return dataToNodes.get(spoFactory.newSPString(s));
+  }
+
+  long mappedDbl(double d) {
+    return dataToNodes.get(spoFactory.newSPDouble(d));
+  }
+
+  long mappedDate(String d) {
+    return dataToNodes.get(spoFactory.newSPTypedLiteral(d, XSD.DATE_TIME_URI));
+  }
+
+  /** Method to set up the string pool before testing. */
+  protected void setUp() throws Exception {
+    boolean exceptionOccurred = true;
+    try {
+      // create a new string pool, building new files in the process.
+      stringPool = new XA11StringPoolImpl(
+          new String[] {TempDir.getTempDir().getPath() + File.separatorChar + "stringpooltest"}
+      );
+      dataToNodes = new HashMap<SPObject,Long>();
+      nodesToData = new HashMap<Long,SPObject>();
+      exceptionOccurred = false;
+    } finally {
+      if (exceptionOccurred) tearDown();
+    }
+  }
+
+
+  /** A setup method for some of the tests. */
+  void simpleConfig() throws Exception {
+    // Make sure the subclass initialization was done
+    if (stringPool == null) throw new IllegalStateException("Subclass didn't set stringPool field");
+
+    // Populate the string pool
+    put(spoFactory.newSPString("foo"));
+    put(spoFactory.newSPString("bar"));
+  }
+
+
+  /** Saves the internal state so a later test can use it. */
+  void saveState() {
+    persistedDataToNodes = dataToNodes;
+    persistedNodesToData = nodesToData;
+  }
+
+
+  /** Saves the internal state so a later test can use it. */
+  void loadState() {
+    dataToNodes = persistedDataToNodes;
+    nodesToData = persistedNodesToData;
+  }
+
+
+  /** The teardown method for JUnit */
+  protected void tearDown() throws Exception {
+    if (stringPool != null) {
+      try {
+        stringPool.close();
+      } finally {
+        stringPool = null;
+      }
+    }
+  }
+
+}

Added: trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolLoadTest.java
===================================================================
--- trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolLoadTest.java	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/java/org/mulgara/store/stringpool/xa11/XA11StringPoolLoadTest.java	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,171 @@
+/*
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * The Original Code is the Kowari Metadata Store.
+ *
+ * The Initial Developer of the Original Code is Plugged In Software Pty
+ * Ltd (http://www.pisoftware.com, mailto:info at pisoftware.com). Portions
+ * created by Plugged In Software Pty Ltd are Copyright (C) 2001,2002
+ * Plugged In Software Pty Ltd. All Rights Reserved.
+ *
+ * Contributor(s): N/A.
+ *
+ * [NOTE: The text of this Exhibit A may differ slightly from the text
+ * of the notices in the Source Code files of the Original Code. You
+ * should use the text of this Exhibit A rather than the text found in the
+ * Original Code Source Code for Your Modifications.]
+ *
+ */
+
+package org.mulgara.store.stringpool.xa11;
+
+import java.io.*;
+import java.util.ArrayList;
+
+// third party packages
+import junit.framework.*;
+import org.apache.log4j.Logger;
+
+// locally written packages
+import org.mulgara.store.stringpool.*;
+import org.mulgara.store.stringpool.xa.SPObjectFactoryImpl;
+import org.mulgara.util.TempDir;
+
+/**
+ * Test case for {@link XAStringPoolImpl}.
+ *
+ * @created 2008-08-20
+ *
+ * @author <a href="http://staff.pisoftware.com/pag">Paul Gearon</a>
+ * @licence <a href="{@docRoot}/../../LICENCE">Mozilla Public License v1.1</a>
+ */
+public class XA11StringPoolLoadTest extends StringPoolLoadTest {
+
+  /** Logger. */
+  @SuppressWarnings("unused")
+  private final static Logger logger = Logger.getLogger(XA11StringPoolLoadTest.class);
+
+  /** The start of the filenames to use for the string pool. */
+  private static String dbFileName = TempDir.getTempDir().getPath() + File.separator + "xaloadtest";
+
+  /** Flag to indicate if the string pool files already exist. */
+  private static boolean dbExists = false;
+
+  /** Instance of a string pool */
+  private XA11StringPoolImpl stringPoolImpl = null;
+
+
+  /**
+   * Constructor.
+   * @param name The name of the test. Used for logging.
+   */
+  public XA11StringPoolLoadTest(String name) {
+    super(name);
+    spoFactory = SPObjectFactoryImpl.getInstance();
+  }
+
+
+  /**
+   * Hook for test runner to obtain a test suite from.
+   * @return The test suite to run.
+   */
+  public static Test suite() {
+    //return new TestSuite(XAStringPoolLoadTest.class);
+    TestSuite suite = new TestSuite();
+    suite.addTest(new XA11StringPoolLoadTest("testLoadStringPool"));
+    suite.addTest(new XA11StringPoolLoadTest("testFirstQuery"));
+    suite.addTest(new XA11StringPoolLoadTest("testSecondQuery"));
+    return suite;
+  }
+
+
+  /**
+   * Default test runner.
+   * @param args The command line arguments
+   */
+  public static void main(String[] args) {
+    junit.textui.TestRunner.run(suite());
+  }
+
+
+  /**
+   * Load the data.
+   */
+  public void testLoadStringPool() throws Exception {
+    // adjust noQueries if the maxSize is less than it
+    noQueries = (maxSize < noQueries) ? maxSize : noQueries;
+
+    // create the array of test entries
+    testEntries = new ArrayList<StringPoolTestEntry>();
+
+    String line = reader.readLine();
+
+    int count = 1;
+    int gapSize = maxSize / noQueries;
+
+    long start = System.currentTimeMillis();
+    while ((line != null) && (count < maxSize)) {
+      SPObject spObject = spoFactory.newSPString(line);
+      long node = stringPool.put(spObject);
+      if (node > Integer.MAX_VALUE) throw new IllegalStateException("Unable to load data that doesn't fit nodes into integers");
+      if ((count % gapSize) == 0) testEntries.add(new StringPoolTestEntry((int)node, spObject));
+      line = reader.readLine();
+      count++;
+    }
+    System.out.println(String.format("Loaded %d statements in %fsec", count, (System.currentTimeMillis() - start) / 1000.0));
+  }
+
+
+  /**
+   * Method to set up the string pool before testing.
+   */
+  protected void setUp() throws Exception {
+    // create a new string pool, building new files in the process.
+    stringPoolImpl = new XA11StringPoolImpl(new String[]{dbFileName});
+    stringPool = stringPoolImpl;
+
+    try {
+      if (dbExists) {
+        int[] phaseNumbers = stringPoolImpl.recover();
+        if (phaseNumbers.length > 0) stringPoolImpl.selectPhase(phaseNumbers[0]);
+        else stringPoolImpl.clear();
+      } else {
+        stringPoolImpl.clear();
+      }
+    } catch (Exception ex) {
+      super.tearDown();
+      throw ex;
+    }
+    super.setUp();
+  }
+
+
+  /**
+   * The teardown method for JUnit
+   */
+  protected void tearDown() throws Exception {
+    dbExists = true;
+    try {
+      if (stringPoolImpl != null) {
+        stringPoolImpl.prepare();
+        stringPoolImpl.commit();
+        stringPoolImpl.unmap();
+        if (System.getProperty("os.name").startsWith("Win")) {
+          // Need this for Windows or truncate() always fails for mapped files.
+          System.gc();
+          System.runFinalization();
+        }
+      }
+    } finally {
+      super.tearDown();
+    }
+  }
+}

Added: trunk/src/jar/store-stringpool-xa11/store-stringpool-xa11-build.properties
===================================================================
--- trunk/src/jar/store-stringpool-xa11/store-stringpool-xa11-build.properties	                        (rev 0)
+++ trunk/src/jar/store-stringpool-xa11/store-stringpool-xa11-build.properties	2008-08-23 00:43:11 UTC (rev 1152)
@@ -0,0 +1,14 @@
+#
+# Properties used by the Store-Stringpool XA 1.1 module
+
+# Module Name
+store-stringpool-xa11.name        = Store-stringpool-xa-1.1
+
+# General module properties
+store-stringpool-xa11.conf.dir      = ${conf.dir}
+store-stringpool-xa11.src.dir       = ${jar.src.dir}/store-stringpool-xa11
+store-stringpool-xa11.obj.dir       = ${jar.obj.dir}/store-stringpool-xa11
+store-stringpool-xa11.dist.dir      = ${bin.dir}
+store-stringpool-xa11.test.dir      = ${test.dir}/store-stringpool-xa11
+store-stringpool-xa11.jxtest.dir    = ${jxtest.dir}/store-stringpool-xa11
+store-stringpool-xa11.jar           = store-stringpool-xa11-base-${mulgara-version}.jar




More information about the Mulgara-svn mailing list