[Mulgara-svn] r916 - in projects/xa2/object-pool/src: data scheduler trie

andrae at mulgara.org andrae at mulgara.org
Tue May 13 08:46:57 UTC 2008


Author: andrae
Date: 2008-05-13 01:46:56 -0700 (Tue, 13 May 2008)
New Revision: 916

Modified:
   projects/xa2/object-pool/src/data/DataEntry.java
   projects/xa2/object-pool/src/data/DataFile.java
   projects/xa2/object-pool/src/scheduler/Block.java
   projects/xa2/object-pool/src/scheduler/FileHandle.java
   projects/xa2/object-pool/src/trie/ByteMap.java
   projects/xa2/object-pool/src/trie/DiskTrie.java
Log:
Progress towards integrating DiskTrie with the new scheduler.



Modified: projects/xa2/object-pool/src/data/DataEntry.java
===================================================================
--- projects/xa2/object-pool/src/data/DataEntry.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/data/DataEntry.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -6,6 +6,7 @@
 package data;
 
 import java.io.IOException;
+import java.nio.BufferOverflowException;
 import java.nio.BufferUnderflowException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
@@ -13,8 +14,8 @@
 import java.util.HashMap;
 import java.util.Map;
 
-//import scheduler.Block;
-//import scheduler.FileHandle;
+import scheduler.Block;
+import scheduler.FileHandle;
 
 /**
  * Abstracts the concept of a data file entry.
@@ -26,13 +27,13 @@
 
   public abstract int dataSize();
 
-  public abstract void write(FileChannel chan) throws IOException;
+  public abstract long write(FileHandle handle) throws IOException;
   public abstract DataEntry canonicalize();
 
   public abstract DataEntry rewind();
-  public abstract ByteBuffer slice(int offset, int length);
-  public abstract byte get(int position) throws BufferUnderflowException;
-  public abstract byte get() throws BufferUnderflowException;
+  public abstract ByteBuffer slice(int offset, int length) throws IOException;
+  public abstract byte get(int position) throws IOException;
+  public abstract byte get() throws IOException;
 
   public int totalSize() {
     return HEADER_LENGTH + dataSize(); // VALUE + LENGTH + DATA
@@ -45,7 +46,12 @@
   public static DataEntry getEntry(byte[] data, long value) {
     return new MemoryBackedDataEntry(value, data, false);
   }
-/*
+
+  public static DataEntry getEntry(FileHandle handle, long position) throws IOException {
+    return new DiskBackedDataEntry(handle, position).canonicalize();
+  }
+
+
   protected static class DiskBackedDataEntry extends DataEntry {
     private FileHandle handle;
     private long position;
@@ -58,13 +64,15 @@
     private int currBlock;
     private int currPosition;
 
-    DiskBackedDataEntry(FileHandle handle, long position) {
+    DiskBackedDataEntry(FileHandle handle, long position) throws IOException {
       this.handle = handle;
       this.position = position;
 
+      int blocksize = 0x01 << handle.getLog2BlockSize();
+
       Block block = handle.readBlock(position);
       this.currBlock = 0;
-      this.currBuffer = currBlock.offset(position & (BLOCK_SIZE - 1));  // block size is a power of 2 therefore mask is size - 1.
+      this.currBuffer = block.offset((int)(position & (blocksize - 1)));
       this.currPosition = 0;
 
       this.value = currBuffer.getLong();
@@ -74,15 +82,14 @@
       // Set the buffer to the entire block for length calculations.
       currBuffer.limit(currBuffer.capacity());
       int remaining = length - currBuffer.remaining();
-      int totalBlocks = remaining / BLOCK_SIZE + (remaining % BLOCK_SIZE > 0 ? 1 : 0);
+      int totalBlocks = remaining / blocksize + (remaining % blocksize > 0 ? 1 : 0);
       blocks = new Block[totalBlocks];
       blocks[0] = block;
       if (totalBlocks == 1) {
         // [mark,length] establishes the data covered by this entry.
         currBuffer.limit(length);
       }
-      // FIXME: We eventually want to submit the remaining blocks to the scheduler for speculative background
-      // fetch.
+      // FIXME: We eventually want to submit the remaining blocks to the scheduler for speculative background fetch.
     }
 
     /**
@@ -94,7 +101,7 @@
      * unnecessary copy.  However releasing a 32K-2M block in preference to a 20-30byte array is preferable if
      * we are doing a stabbing query.
      * Especially as a DiskBackedDataEntry requires 48-bytes anyway vs. 12 for a MemoryBackedDataEntry.
-     * /
+     */
     public DataEntry canonicalize() {
       if (blocks.length == 1) {
         byte[] data = new byte[currBuffer.reset().remaining()];
@@ -115,8 +122,9 @@
       } else {
         currBlock = 0;
         // block size is a power of 2 therefore mask is size - 1.
-        currBuffer = blocks[0].offset((position & (BLOCK_SIZE - 1)) + HEADER_LENGTH);
-        if (totalBlocks == 1) {
+        currBuffer = blocks[0].offset((int)(position & (blocks[0].getBlockSize() - 1)) + HEADER_LENGTH);
+        currBuffer.mark();
+        if (blocks.length == 1) {
           // [mark,length] establishes the data covered by this entry.
           // Note: This should never occur as we expect DataEntries to be canonicalized, which would result in
           // a 1 block entry being replaced by a MemoryBackedDataEntry.
@@ -138,19 +146,16 @@
       }
     }
 
-    public byte get(int off) {
-      if (off < 0) {
-        throw new BufferUnderflowException("Attempt to use -ve offset");
-      }
-      if (off >= length) {
-        throw new BufferOverflowException("Attempt to use offset > length");
-      }
+    public byte get(int off) throws IOException {
+      if (off < 0) throw new BufferUnderflowException();
+      if (off >= length) throw new BufferOverflowException();
 
-      currBlock = (off + HEADER_LENGTH) / BLOCK_SIZE;
+      int blocksize = (0x01 << handle.getLog2BlockSize());
+      currBlock = (off + HEADER_LENGTH) / blocksize;
       if (blocks[currBlock] == null) {
-        blocks[currBlock] = handle.readBlock(position + currBlock * BLOCKSIZE);
+        blocks[currBlock] = handle.readBlock(position + currBlock * blocksize);
       }
-      currBuffer = blocks[currBlock].offset(position & (BLOCK_SIZE - 1));  // block size is a power of 2 therefore mask is size - 1.
+      currBuffer = blocks[currBlock].offset((int)(position & (blocksize - 1)));  // block size is a power of 2 therefore mask is size - 1.
 
       // Allow for header.
       if (currBlock == 0) {
@@ -158,7 +163,7 @@
       }
       // Allow for partial final block.
       if (currBlock == blocks.length - 1) {
-        currBuffer.limit(length % BLOCK_SIZE + (position & (BLOCKSIZE - 1)));
+        currBuffer.limit((int)(length % blocksize + (position & (blocksize - 1))));
       }
 
       currPosition = off;
@@ -166,15 +171,17 @@
       return get();
     }
 
-    public void write(FileChannel chan) throws IOException {
-      handle.transferTo(chan, position, HEADER_LENGTH + length);
+    public long write(FileHandle handle) throws IOException {
+      handle.transferTo(handle, position, HEADER_LENGTH + length);
+
+      return position;
     }
 
     /**
      * FIXME: We really need to return our own ByteBuffer implementation here that exploits lazy loading.
      *
      * Note this is a really really inefficient implementation.
-     * /
+     */
     public ByteBuffer slice(int offset, int length) throws IOException {
       ByteBuffer buffer = ByteBuffer.allocate(length);
       buffer.put(get(offset));
@@ -185,7 +192,8 @@
       return buffer;
     }
   }
-*/
+
+
   protected static class MemoryBackedDataEntry extends DataEntry {
     private ByteBuffer data;
 
@@ -227,12 +235,12 @@
       return this;
     }
 
-    public void write(FileChannel chan) throws IOException {
+    public long write(FileHandle handle) throws IOException {
       ByteBuffer bb = ByteBuffer.allocate(HEADER_LENGTH);
       bb.clear();
       bb.putLong(value);
       bb.putInt(data.capacity());
-      chan.write(new ByteBuffer[] { (ByteBuffer)bb.flip(), (ByteBuffer)bb.duplicate().clear() });
+      return handle.writeBuffers(new ByteBuffer[] { (ByteBuffer)bb.flip(), (ByteBuffer)bb.duplicate().clear() });
     }
 
     public ByteBuffer slice(int offset, int length) {

Modified: projects/xa2/object-pool/src/data/DataFile.java
===================================================================
--- projects/xa2/object-pool/src/data/DataFile.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/data/DataFile.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -6,12 +6,10 @@
 package data;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.channels.FileChannel;
-import java.util.HashMap;
-import java.util.Map;
 
+import scheduler.FileHandle;
+import scheduler.IOScheduler;
+
 /**
  * Abstracts the concept of a file of sorted octet sequences.
  */
@@ -27,6 +25,10 @@
   }
 
   public DataEntry getEntry(long position) throws IOException {
-    return new DiskBackedDataEntry(handle, position).canonicalize();
+    return DataEntry.getDataEntry(handle, position);
   }
+
+  public long write(DataEntry entry) {
+    return entry.write(handle);
+  }
 }

Modified: projects/xa2/object-pool/src/scheduler/Block.java
===================================================================
--- projects/xa2/object-pool/src/scheduler/Block.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/scheduler/Block.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -5,24 +5,39 @@
  */
 package scheduler;
 
+import java.io.IOException;
+import java.nio.BufferUnderflowException;
+import java.nio.ByteBuffer;
+
 public class Block {
+  public static final long UNALLOCATED = -1;
+
   private FileHandle handle;
   private ByteBuffer buffer;
   private Long blockId;
 
-  public Block(FileHandle handle, int size) {
+  public Block(FileHandle handle, int log2bs) {
     this.handle = handle;
     this.blockId = null;
-    this.buffer.allocateDirect(size);
+    this.buffer.allocateDirect(0x01 << log2bs);
+    this.blockId = UNALLOCATED;
   }
 
+  public long write() throws IOException {
+    return handle.writeBlock(this);
+  }
+
   public FileHandle getHandle() {
     return handle;
   }
 
+  public int getBlockSize() {
+    return buffer.capacity();
+  }
+
   public Long getBlockId() {
-    if (blockId == null) {
-      throw new IllegalStateException("Attempt to obtain blockId for uninitialized block");
+    if (blockId == UNALLOCATED) throw new IllegalStateException("Attempt to obtain blockId for uninitialized block");
+
     return blockId;
   }
 
@@ -35,6 +50,38 @@
     return (ByteBuffer)(buffer.clear());
   }
 
+  public int getInt() throws BufferUnderflowException {
+    return buffer.getInt();
+  }
+
+  public short getShort() throws BufferUnderflowException {
+    return buffer.getShort();
+  }
+
+  public byte getByte() throws BufferUnderflowException {
+    return buffer.get();
+  }
+
+  public void putInt(int i) {
+    buffer.putInt(i);
+  }
+
+  public void putShort(short s) {
+    buffer.putShort(s);
+  }
+
+  public void putByte(byte b) {
+    buffer.put(b);
+  }
+
+  public int position() {
+    return buffer.position();
+  }
+
+  public ByteBuffer offset(int position) {
+    return (ByteBuffer)((ByteBuffer)buffer.position(position)).slice();
+  }
+
   /**
    * Prepare the Block to be written to/from memory.
    * Sets the Buffer's position = 0; limit = size; and leaves the blockId unchanged.

Modified: projects/xa2/object-pool/src/scheduler/FileHandle.java
===================================================================
--- projects/xa2/object-pool/src/scheduler/FileHandle.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/scheduler/FileHandle.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -6,38 +6,45 @@
 package scheduler;
 
 import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
 import java.nio.channels.FileChannel;
 
 public class FileHandle {
   private File file;
   private FileChannel channel;
-  private int blocksize;
+  private int log2bs;
 
   private long seeks;
 
-  FileHandle(File file, FileChannel channel, int magic, int blocksize) {
+  FileHandle(File file, FileChannel channel, int magic, int log2bs) {
     this.file = file;
     this.channel = channel;
-    this.blocksize = blocksize;
+    this.log2bs = log2bs;
     this.seeks = 0;
   }
 
-  FileHandle(File file, FileInputStream stream, int magic, int blocksize) {
-    this(file, stream.getChannel(), magic, blocksize);
+  FileHandle(File file, FileOutputStream stream, int magic, int log2bs) throws IOException {
+    this(file, stream.getChannel(), magic, log2bs);
 
-    ByteBuffer bb = ByteBuffer.allocate(blocksize);
+    ByteBuffer bb = ByteBuffer.allocate(0x01 << log2bs);
     bb.putInt(magic);
-    bb.putInt(blocksize);
+    bb.putInt(log2bs);
     bb.position(0);
     bb.limit(bb.capacity());
 
     channel.write(bb);
   }
 
-  FileHandle(File file, FileOutputStream stream, int magic, int blocksize) {
-    this(file, stream.getChannel(), magic, blocksize);
+  FileHandle(File file, FileInputStream stream, int magic, int log2bs) throws IOException {
+    this(file, stream.getChannel(), magic, log2bs);
 
-    ByteBuffer bb = ByteBuffer.allocate(blocksize);
+    if (log2bs < 10) throw new IllegalArgumentException("Attempt to open file with blocksize < 1KB");
+
+    ByteBuffer bb = ByteBuffer.allocate(0x01 << log2bs);
     this.channel.read(bb);
 
     bb.clear();
@@ -54,8 +61,8 @@
     }
 
     int filebs = bb.getInt();
-    if (filebs != blocksize) {
-      throw new IllegalArgumentException("Attempt to read file(" + file + ") using incorrect blocksize: " + blocksize);
+    if (filebs != log2bs) {
+      throw new IllegalArgumentException("Attempt to read file(" + file + ") using incorrect log2bs: " + log2bs);
     }
   }
 
@@ -63,23 +70,49 @@
     return file;
   }
 
-  Block readBlock(Long blockId) {
+  public int getLog2BlockSize() {
+    return log2bs;
+  }
+
+  /**
+   * FIXME: This should be package scope.
+   */
+  public Block readBlock(Long blockId) throws IOException {
     long position = blockId.longValue();
     if (channel.position() != position) {
       seeks++;
       channel.position(position);
     }
 
-    channel.read(new Block().block.prepare(blockId));
+    Block block = new Block(this, log2bs);
+    channel.read(block.prepare(blockId));
 
     return block;
   }
 
-  Long writeBlock(Block block) {
-    Long position = new Long(channel.position());
+  long writeBlock(Block block) throws IOException {
+    long position = channel.position();
 
-    channel.write(block.prepare(blockId));
+    channel.write(block.prepare(new Long(position)));
 
     return position;
   }
+
+  /**
+   * FIXME: Should be package scope.
+   */
+  public long writeBuffers(ByteBuffer[] buffers) throws IOException {
+    long position = channel.position();
+
+    channel.write(buffers);
+
+    return position;
+  }
+
+  /**
+   * FIXME: Should be package scope.
+   */
+  public void transferTo(FileHandle handle, long position, int length) throws IOException {
+    channel.transferTo(position, length, handle.channel);
+  }
 }

Modified: projects/xa2/object-pool/src/trie/ByteMap.java
===================================================================
--- projects/xa2/object-pool/src/trie/ByteMap.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/trie/ByteMap.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -12,6 +12,7 @@
 import java.util.Map;
 import java.util.NoSuchElementException;
 
+import scheduler.Block;
 
 /**
  * Represents a map from a byte to data.
@@ -30,7 +31,7 @@
   }
 
   @SuppressWarnings("unchecked")
-  public ByteMap(ByteBuffer index, Map<Short, T> nodeMap) {
+  public ByteMap(Block index, Map<Short, T> nodeMap) {
     high = index.getShort();
     low = new short[ones(high)];
     short dataCount = 0;

Modified: projects/xa2/object-pool/src/trie/DiskTrie.java
===================================================================
--- projects/xa2/object-pool/src/trie/DiskTrie.java	2008-05-12 08:19:41 UTC (rev 915)
+++ projects/xa2/object-pool/src/trie/DiskTrie.java	2008-05-13 08:46:56 UTC (rev 916)
@@ -3,6 +3,7 @@
  * Author Andrae Muys
  * Date 22nd April 2008
  */
+package trie;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -11,13 +12,15 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import scheduler.FileHandle;
+
 /**
- * Extends CompMemTrie to provide IO functionality.
+ * Extends MemoryTrie to provide IO functionality.
  *
  * Guarantees Trie will fit within a single block, refuses to accept insert() if it would make the
  * serialized size of the node greater than the blocksize.
  */
-public class CompBlockTrie extends CompMemTrie {
+public class DiskTrie extends MemoryTrie {
   @SuppressWarnings("serial")
   public static class InsufficientSpace extends Exception
       { public InsufficientSpace(String s) { super(s); } };
@@ -54,28 +57,27 @@
    *   2.00 billion entries by a single I2 index in 49GB.
    *   2.52 trillion entries by a single I3 index in 60TB.
    */ 
-  private int blockSize;
+  private Block indexBlock;
 
-  public CompBlockTrie(int blockSize) {
+  // FIXME: Will need to provide access to scheduler in order to provide a facility to support overflow of
+  // large data-objects.  Not sure what the cut-offs should be but probably in the vacinity of 128k-4MB for
+  // inline; 16MB-64MB for delete-marked bulk managed; and over that as indirected individual files.
+  public DiskTrie(Block indexBlock) {
     super();
-    assert blockSize > 1024;
-    assert blockSize <= 32*1024;
-    this.blockSize = blockSize;
+    if (indexBlock.getBlockSize() > 1024) throw new IllegalArgumentException("Attempt to use Block < 1024bytes in DiskTrie");
+    if (indexBlock.getBlockSize() <= 32*1024) throw new IllegalArgumentException("Attempt to use Block >32KB in DiskTrie");
+    if (indexBlock.getBlockId() != Block.UNALLOCATED) throw new IllegalArgumentException("Attempt to reuse block in DiskTrie");
+
+    this.indexBlock = indexBlock;
     this.space = (short)((blockSize - 8) / WORST_CASE_ENTRY_SIZE); // -8 leaves room for header info.
   }
 
-  public CompBlockTrie(ByteBuffer index, FileChannel data) throws IOException {
+  public DiskTrie(Block indexBlock, DataFile data) throws IOException {
     super();
-    index.rewind(); // Note sure if I should doing this here or delegating to the caller.
-    int magic = index.getInt();
-    if (magic != MAGIC) {
-      if (magic == INVERT_MAGIC) {
-        index.order(index.order().equals(ByteOrder.BIG_ENDIAN) ? ByteOrder.LITTLE_ENDIAN : ByteOrder.BIG_ENDIAN);
-      } else {
-        throw new IllegalArgumentException("Bad magic in index buffer: " + magic + ", MAGIC=" + MAGIC);
-      }
-    }
+    if (indexBlock.getBlockId == Block.UNALLOCATED) throw new IllegalArgumentException("Attempt to read unallocated block in DiskTrie");
 
+    indexBlock.prepare();
+
     // I should be able to replace this with a stack.
     // The child->parent relationship is implicit in the ordering of the nodes in the file.
     // The only thing we need to know is when to stop reading nodes and that is provided by rootLocation.
@@ -87,19 +89,19 @@
     short rootLocation = -1;
     while (rootLocation == -1) {
       short location = (short)index.position();
-      byte type = index.get();
+      byte type = indexBlock.getByte();
       switch (type) {
         case TYPE_BRANCH_TERM:
-          nodeMap.put(location, readTrieBranch(index, true, nodeMap));
+          nodeMap.put(location, readTrieBranch(indexBlock, true, nodeMap));
           break;
         case TYPE_BRANCH_NOTERM:
-          nodeMap.put(location, readTrieBranch(index, false, nodeMap));
+          nodeMap.put(location, readTrieBranch(indexBlock, false, nodeMap));
           break;
         case TYPE_LEAF:
-          nodeMap.put(location, readTrieLeaf(index, data));
+          nodeMap.put(location, readTrieLeaf(indexBlock, data));
           break;
         case TYPE_ROOT_LOC:
-          index.get();
+          index.getByte(); // Skip padding.
           rootLocation = index.getShort();
           break;
         default:
@@ -131,34 +133,17 @@
     return true;
   }
 
-  public void write(ByteBuffer index, FileChannel data) throws InsufficientSpace, IOException {
-    if (index.remaining() < blockSize) {
-      throw new InsufficientSpace("Insufficient space remaining in buffer to write block");
-    }
-
+  public void write(DataFile data) throws InsufficientSpace, IOException {
     // Temporarally set the limit to blocksize.
-    int limit = index.limit();
-    index.limit(index.position() + blockSize);
-
     if (root == null) {
       throw new IllegalStateException("Attempt to write empty trie");
     }
 
-    int indexreq = (root == null) ? 8 : totalIndexSize(root) + 4 + 4;  // + sizeof(MAGIC) + sizeof(root_loc_type + root_loc)
-    if (indexreq > index.remaining()) {
-      System.err.printf("Index-Req:%d ; remaining:%d ; capacity:%d ; limit:%d ; position:%d\n", indexreq,
-          index.remaining(), index.capacity(), index.limit(), index.position());
-      throw new InsufficientSpace("Attempt to write trie index to bytebuffer with insufficient space");
-    }
-    if (indexreq > 0x00010000) {
-      throw new InsufficientSpace("Attempt to write trie index larger than 64K");
-    }
-
     HashMap<TrieNode, Short> locationMap = new HashMap<TrieNode, Short>();
 
     index.putInt(MAGIC);
     if (root != null) {
-      writeTrieNode(root, index, data, locationMap);
+      writeTrieNode(root, indexBlock, data, locationMap);
     }
     index.put(TYPE_ROOT_LOC);
     index.put((byte)0xFF);
@@ -167,11 +152,6 @@
     } else {
       index.putShort((short)0x0000);
     }
-
-    // Set the position to the block size to ensure whole blocks are written.
-    index.position(index.limit());
-    // Reset the limit to its initial value.
-    index.limit(limit);
   }
 
   private static Map<Class, TrieNodeWriter> writerMap = new HashMap<Class, TrieNodeWriter>();
@@ -197,7 +177,7 @@
   }
 
   private static class TrieBranchWriter implements TrieNodeWriter {
-    public void write(TrieNode node, ByteBuffer index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException {
+    public void write(TrieNode node, Block index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException {
       TrieBranch branch = (TrieBranch)node;
       if (branch.term != null) {
         writeTrieNode(branch.term, index, data, locationMap);
@@ -209,7 +189,7 @@
       locationMap.put(branch, (short)index.position());
       
       index.put((branch.term == null) ? TYPE_BRANCH_NOTERM : TYPE_BRANCH_TERM);
-      index.put((byte)0xFF);  // Padding to keep things short-aligned.
+      index.put((byte)0xFF);  // Padding to keep things short-aligned. 0xFF is more visible in hexdump than 0x00.
       index.putInt(branch.offset);
       if (branch.term != null) {
         index.putShort(locationMap.get(branch.term));
@@ -222,11 +202,10 @@
   }
 
   private static class TrieLeafWriter implements TrieNodeWriter {
-    public void write(TrieNode node, ByteBuffer index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException {
+    public void write(TrieNode node, Block index, DataFile data, Map<TrieNode, Short> locationMap) throws IOException {
       TrieLeaf leaf = (TrieLeaf)node;
 
-      long keyLocation = data.position();
-      leaf.entry.write(data);
+      long keyLocation = data.write(leaf.entry);
 
       locationMap.put(leaf, (short)index.position());
       index.put(TYPE_LEAF);
@@ -235,10 +214,10 @@
     }
   }
 
-  private TrieBranch readTrieBranch(ByteBuffer index, boolean hasTerm, Map<Short, TrieNode> nodeMap) throws IOException {
+  private TrieBranch readTrieBranch(Block index, boolean hasTerm, Map<Short, TrieNode> nodeMap) throws IOException {
     TrieBranch branch = new TrieBranch();
 
-    index.get();  // skip padding.
+    index.getByte();  // skip padding.
     branch.offset = index.getInt();
     if (hasTerm) {
       branch.term = (TrieLeaf)nodeMap.get(index.getShort());
@@ -258,7 +237,7 @@
     return branch;
   }
 
-  private TrieLeaf readTrieLeaf(ByteBuffer index, DataFile dataFile) throws IOException {
+  private TrieLeaf readTrieLeaf(Block index, DataFile dataFile) throws IOException {
     index.get();  // skip padding.
     long keyLocation = index.getLong();
 




More information about the Mulgara-svn mailing list