[Mulgara-svn] r898 - projects/xa2/object-pool/src/trie
andrae at mulgara.org
andrae at mulgara.org
Tue May 6 06:21:37 UTC 2008
Author: andrae
Date: 2008-05-05 23:21:36 -0700 (Mon, 05 May 2008)
New Revision: 898
Added:
projects/xa2/object-pool/src/trie/DiskTrie.java
projects/xa2/object-pool/src/trie/MemoryTrie.java
projects/xa2/object-pool/src/trie/OnesTable.java
Modified:
projects/xa2/object-pool/src/trie/ByteMap.java
Log:
Rearranging classes.
Modified: projects/xa2/object-pool/src/trie/ByteMap.java
===================================================================
--- projects/xa2/object-pool/src/trie/ByteMap.java 2008-05-06 05:00:19 UTC (rev 897)
+++ projects/xa2/object-pool/src/trie/ByteMap.java 2008-05-06 06:21:36 UTC (rev 898)
@@ -4,7 +4,7 @@
* Date 8th April 2008
*/
-import static gen.OnesTable.*;
+import static trie.OnesTable.*;
import java.nio.ByteBuffer;
import java.util.Iterator;
Copied: projects/xa2/object-pool/src/trie/DiskTrie.java (from rev 872, projects/xa2/object-pool/src/trie/CompBlockTrie.java)
===================================================================
--- projects/xa2/object-pool/src/trie/DiskTrie.java (rev 0)
+++ projects/xa2/object-pool/src/trie/DiskTrie.java 2008-05-06 06:21:36 UTC (rev 898)
@@ -0,0 +1,342 @@
+/*
+ * Copyright Topaz Foundation 2008
+ * Author Andrae Muys
+ * Date 22nd April 2008
+ */
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.channels.FileChannel;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Extends CompMemTrie to provide IO functionality.
+ *
+ * Guarantees Trie will fit within a single block, refuses to accept insert() if it would make the
+ * serialized size of the node greater than the blocksize.
+ */
+public class CompBlockTrie extends CompMemTrie {
+ @SuppressWarnings("serial")
+ public static class InsufficientSpace extends Exception
+ { public InsufficientSpace(String s) { super(s); } };
+
+ static final byte TYPE_BRANCH_TERM = 0x01; // Indicates a trie branch with a termination entry..
+ static final byte TYPE_BRANCH_NOTERM = 0x02; // Indicates a trie branch without a termination entry..
+ static final byte TYPE_LEAF = 0x03; // Indicates a trie branch without a termination entry..
+ static final byte TYPE_ROOT_LOC = 0x04; // Indicates the following short indicates a branch.
+ public final static int MAGIC = 0x00020001;
+ public final static int INVERT_MAGIC = 0x01000200;
+
+ // Calculated as the worst case size per entry.
+ // Assumes a full leaf and branch per entry.
+ // Block: 2-byte type-header
+ // 4-byte offset
+ // 2-byte ByteMap-High
+ // 4-byte ByteMap-Lows (2-entries).
+ // 4-byte Children Ptrs.
+ // Leaf: 2-byte type-header
+ // 8-byte offset.
+ // 26-bytes Total.
+ // Best case of course is 10-bytes leaf + 2-bytes children in branch.
+ private static short WORST_CASE_ENTRY_SIZE = 26;
+//private static short BEST_CASE_ENTRY_SIZE = 12;
+
+ /** Worst-case space left in trie measured in entries */
+ private short space;
+
+ /**
+ * The blocksize for this Block-Trie.
+ * Currently the maximum block size supported is 32K, which is worst-case:
+ * 1260 entries covered by a single I0 index in 32KB.
+ * 1.59 million entries by a single I1 index in 40MB.
+ * 2.00 billion entries by a single I2 index in 49GB.
+ * 2.52 trillion entries by a single I3 index in 60TB.
+ */
+ private int blockSize;
+
+ public CompBlockTrie(int blockSize) {
+ super();
+ assert blockSize > 1024;
+ assert blockSize <= 32*1024;
+ this.blockSize = blockSize;
+ this.space = (short)((blockSize - 8) / WORST_CASE_ENTRY_SIZE); // -8 leaves room for header info.
+ }
+
+ public CompBlockTrie(ByteBuffer index, FileChannel data) throws IOException {
+ super();
+ index.rewind(); // Note sure if I should doing this here or delegating to the caller.
+ int magic = index.getInt();
+ if (magic != MAGIC) {
+ if (magic == INVERT_MAGIC) {
+ index.order(index.order().equals(ByteOrder.BIG_ENDIAN) ? ByteOrder.LITTLE_ENDIAN : ByteOrder.BIG_ENDIAN);
+ } else {
+ throw new IllegalArgumentException("Bad magic in index buffer: " + magic + ", MAGIC=" + MAGIC);
+ }
+ }
+
+ // I should be able to replace this with a stack.
+ // The child->parent relationship is implicit in the ordering of the nodes in the file.
+ // The only thing we need to know is when to stop reading nodes and that is provided by rootLocation.
+ // If treated as a stack once we have read the root the stack should contain only a single element - the
+ // root node.
+ // For now I'm hoping the HashMap will help with debugging - but it is wasteful of memory in the long run.
+ HashMap<Short, TrieNode> nodeMap = new HashMap<Short, TrieNode>();
+
+ short rootLocation = -1;
+ while (rootLocation == -1) {
+ short location = (short)index.position();
+ byte type = index.get();
+ switch (type) {
+ case TYPE_BRANCH_TERM:
+ nodeMap.put(location, readTrieBranch(index, true, nodeMap));
+ break;
+ case TYPE_BRANCH_NOTERM:
+ nodeMap.put(location, readTrieBranch(index, false, nodeMap));
+ break;
+ case TYPE_LEAF:
+ nodeMap.put(location, readTrieLeaf(index, data));
+ break;
+ case TYPE_ROOT_LOC:
+ index.get();
+ rootLocation = index.getShort();
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid trie-node");
+ }
+ }
+
+ root = nodeMap.get(rootLocation);
+
+ this.space = 0; // If this gets modified it will be recalculated automaticly then.
+ }
+
+ public boolean insert(byte[] key, long value) {
+ // Space was calculated on the basis of worst-case - if we were worst case we have now filled the
+ // trie-block, but if we haven't we should recalculate the space available.
+ if (space == 0) {
+ int size = totalIndexSize(root);
+ space = (short)((blockSize - size - 8) / WORST_CASE_ENTRY_SIZE);
+ if (space < 0) {
+ throw new IllegalStateException("Overfilled CompBlockTrie");
+ } else if (space == 0) {
+ return false;
+ }
+ }
+
+ super.insert(key, value);
+ space--;
+
+ return true;
+ }
+
+ public void write(ByteBuffer index, FileChannel data) throws InsufficientSpace, IOException {
+ if (index.remaining() < blockSize) {
+ throw new InsufficientSpace("Insufficient space remaining in buffer to write block");
+ }
+
+ // Temporarally set the limit to blocksize.
+ int limit = index.limit();
+ index.limit(index.position() + blockSize);
+
+ if (root == null) {
+ throw new IllegalStateException("Attempt to write empty trie");
+ }
+
+ int indexreq = (root == null) ? 8 : totalIndexSize(root) + 4 + 4; // + sizeof(MAGIC) + sizeof(root_loc_type + root_loc)
+ if (indexreq > index.remaining()) {
+ System.err.printf("Index-Req:%d ; remaining:%d ; capacity:%d ; limit:%d ; position:%d\n", indexreq,
+ index.remaining(), index.capacity(), index.limit(), index.position());
+ throw new InsufficientSpace("Attempt to write trie index to bytebuffer with insufficient space");
+ }
+ if (indexreq > 0x00010000) {
+ throw new InsufficientSpace("Attempt to write trie index larger than 64K");
+ }
+
+ HashMap<TrieNode, Short> locationMap = new HashMap<TrieNode, Short>();
+
+ index.putInt(MAGIC);
+ if (root != null) {
+ writeTrieNode(root, index, data, locationMap);
+ }
+ index.put(TYPE_ROOT_LOC);
+ index.put((byte)0xFF);
+ if (root != null) {
+ index.putShort(locationMap.get(root));
+ } else {
+ index.putShort((short)0x0000);
+ }
+
+ // Set the position to the block size to ensure whole blocks are written.
+ index.position(index.limit());
+ // Reset the limit to its initial value.
+ index.limit(limit);
+ }
+
+ private static Map<Class, TrieNodeWriter> writerMap = new HashMap<Class, TrieNodeWriter>();
+ static {
+ writerMap.put(TrieBranch.class, new TrieBranchWriter());
+ writerMap.put(TrieLeaf.class, new TrieLeafWriter());
+ }
+
+ protected static void writeTrieNode(TrieNode node, ByteBuffer index, FileChannel data,
+ Map<TrieNode, Short> locationMap) throws IOException {
+ writerMap.get(node.getClass()).write(node, index, data, locationMap);
+ }
+
+ private static interface TrieNodeWriter {
+ /**
+ * This method *must* update location with the position in the index buffer it was written to.
+ * This allows parents to obtain a physical reference to this node in their write methods.
+ * The index buffer is the target of the Trie index itself.
+ * The data buffer is the target of the byte[]/value pairs stored in the leaves. We want to keep these
+ * separate for caching purposes.
+ */
+ public void write(TrieNode node, ByteBuffer index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException;
+ }
+
+ private static class TrieBranchWriter implements TrieNodeWriter {
+ public void write(TrieNode node, ByteBuffer index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException {
+ TrieBranch branch = (TrieBranch)node;
+ if (branch.term != null) {
+ writeTrieNode(branch.term, index, data, locationMap);
+ }
+ for (TrieNode child : branch.children) {
+ writeTrieNode(child, index, data, locationMap);
+ }
+
+ locationMap.put(branch, (short)index.position());
+
+ index.put((branch.term == null) ? TYPE_BRANCH_NOTERM : TYPE_BRANCH_TERM);
+ index.put((byte)0xFF); // Padding to keep things short-aligned.
+ index.putInt(branch.offset);
+ if (branch.term != null) {
+ index.putShort(locationMap.get(branch.term));
+ }
+ branch.children.writeHeader(index);
+ for (TrieNode child : branch.children) {
+ index.putShort(locationMap.get(child));
+ }
+ }
+ }
+
+ private static class TrieLeafWriter implements TrieNodeWriter {
+ public void write(TrieNode node, ByteBuffer index, FileChannel data, Map<TrieNode, Short> locationMap) throws IOException {
+ TrieLeaf leaf = (TrieLeaf)node;
+
+ ByteBuffer dataBuf = ByteBuffer.allocateDirect(8 + 4 + leaf.key.length);
+ long keyLocation = data.position();
+ dataBuf.putLong(leaf.value);
+ dataBuf.putInt(leaf.key.length);
+ dataBuf.put(leaf.key);
+ data.write((ByteBuffer)dataBuf.flip());
+
+ locationMap.put(leaf, (short)index.position());
+ index.put(TYPE_LEAF);
+ index.put((byte)0xFF); // Pad to 16-bit aligned.
+ index.putLong(keyLocation);
+ }
+ }
+
+ private TrieBranch readTrieBranch(ByteBuffer index, boolean hasTerm, Map<Short, TrieNode> nodeMap) throws IOException {
+ TrieBranch branch = new TrieBranch();
+
+ index.get(); // skip padding.
+ branch.offset = index.getInt();
+ if (hasTerm) {
+ branch.term = (TrieLeaf)nodeMap.get(index.getShort());
+ }
+ branch.children = new ByteMap<TrieNode>(index, nodeMap);
+ if (branch.term == null) {
+ // This two-step process is required to avoid the cast from Object[] to TrieNode[] inserted by java
+ // generics which triggers the obvious ClassCastException. By extracting explicitly to an Object[]
+ // first; then indexing to obtain an Object; then casting the Object to TrieNode the compiler doesn't
+ // insert the erroneous cast.
+ Object[] d = branch.children.data;
+ branch.aLeaf = ((TrieNode)(d[0])).aLeaf;
+ } else {
+ branch.aLeaf = branch.term;
+ }
+
+ return branch;
+ }
+
+ private TrieLeaf readTrieLeaf(ByteBuffer index, FileChannel data) throws IOException {
+ index.get();
+ long keyLocation = index.getLong();
+ data.position(keyLocation);
+ // FIXME: I need to cache these to reduce the allocation cost which is showing up under profiling
+ ByteBuffer bb = ByteBuffer.allocateDirect(8+4); // sizeof(value) + sizof(length).
+ data.read(bb);
+ bb.flip();
+ long value = bb.getLong();
+ int length = bb.getInt();
+ byte[] key = new byte[length]; // FIXME: I eventually need to replace this with a lazy load.
+ data.read(ByteBuffer.wrap(key));
+
+ return new TrieLeaf(key, value, false);
+ }
+
+ private static Map<Class, TrieNodeSizer> sizerMap = new HashMap<Class, TrieNodeSizer>();
+ static {
+ sizerMap.put(TrieBranch.class, new TrieBranchSizer());
+ sizerMap.put(TrieLeaf.class, new TrieLeafSizer());
+ }
+
+ protected static int totalIndexSize(TrieNode node) {
+ return sizerMap.get(node.getClass()).indexSize(node);
+ }
+
+ protected static int totalDataSize(TrieNode node) {
+ return sizerMap.get(node.getClass()).dataSize(node);
+ }
+
+ private static interface TrieNodeSizer {
+ public int indexSize(TrieNode node);
+ public int dataSize(TrieNode node);
+ }
+
+ private static class TrieBranchSizer implements TrieNodeSizer {
+ protected int memSize(TrieBranch branch) {
+ return 4 + 1 + 1 + branch.children.memSize() + (branch.term == null ? 0 : 2);
+ }
+
+ public int indexSize(TrieNode node) {
+ TrieBranch branch = (TrieBranch)node;
+ int total = memSize(branch);
+ if (branch.term != null) {
+ total += totalIndexSize(branch.term);
+ }
+ for (TrieNode child : branch.children) {
+ total += totalIndexSize(child);
+ }
+
+ return total;
+ }
+
+ public int dataSize(TrieNode node) {
+ TrieBranch branch = (TrieBranch)node;
+ int total = 0;
+ if (branch.term != null) {
+ total += totalDataSize(branch.term);
+ }
+ for (TrieNode child : branch.children) {
+ total += totalDataSize(child);
+ }
+
+ return total;
+ }
+ }
+
+ private static class TrieLeafSizer implements TrieNodeSizer {
+ public int indexSize(TrieNode node) {
+ return 1 + 1 + 8;
+ }
+
+ public int dataSize(TrieNode node) {
+ TrieLeaf leaf = (TrieLeaf)node;
+ return 8 + 4 + leaf.key.length;
+ }
+ }
+}
Copied: projects/xa2/object-pool/src/trie/MemoryTrie.java (from rev 872, projects/xa2/object-pool/src/trie/CompMemTrie.java)
===================================================================
--- projects/xa2/object-pool/src/trie/MemoryTrie.java (rev 0)
+++ projects/xa2/object-pool/src/trie/MemoryTrie.java 2008-05-06 06:21:36 UTC (rev 898)
@@ -0,0 +1,255 @@
+/*
+ * Copyright Topaz Foundation 2008
+ * Author Andrae Muys
+ * Date 15th April 2008
+ */
+
+import java.util.Arrays;
+
+/**
+ * Implements an in-memory trie - uses ByteMaps to implement trie nodes.
+ */
+public class CompMemTrie {
+ @SuppressWarnings("serial")
+ public static class NotFound extends Exception {};
+ private static NotFound notfound = new NotFound();
+
+ protected TrieNode root;
+ public CompMemTrie() {
+ this.root = null;
+ }
+
+ public boolean insert(byte[] key, long value) {
+ if (root == null) {
+ root = new TrieLeaf(key, value);
+ } else {
+ if (!root.insert(key, value)) {
+ root = new TrieBranch(root, key, value);
+ }
+ }
+
+ return true;
+ }
+
+ public long lookup(byte[] key) throws NotFound {
+ boolean[] valid = new boolean[1];
+ long result = lookup(key, valid);
+ if (valid[0]) {
+ return result;
+ } else {
+ throw notfound;
+ }
+ }
+
+
+ public long lookup(byte[] key, boolean[] valid) {
+ if (root == null) {
+ valid[0] = false;
+ return 0;
+ } else {
+ return root.lookup(key, valid);
+ }
+ }
+
+ protected abstract static class TrieNode {
+ protected TrieLeaf aLeaf;
+
+ /**
+ * @return false if we need to insert key above this node.
+ */
+ public boolean insert(byte[] key, long value) {
+ return insert(new TrieLeaf(key, value), 0);
+ }
+
+ public long lookup(byte[] key, boolean[] valid) {
+ return lookup(key, 0, valid);
+ }
+
+ protected abstract boolean insert(TrieLeaf node, int parentLcd);
+ protected abstract long lookup(byte[] key, int parentLcd, boolean[] valid);
+
+ protected static boolean regionMatches(byte[] lhs, int lhsOff, byte[] rhs, int rhsOff, int len) {
+ if (lhsOff < 0 || rhsOff < 0) {
+ return false;
+ }
+ if (lhs.length < lhsOff + len || rhs.length < rhsOff + len) {
+ return false;
+ }
+ for (int i = 0; i < len; i++) {
+ if (lhs[lhsOff + i] != rhs[rhsOff + i]) return false;
+ }
+
+ return true;
+ }
+ }
+
+ protected static class TrieBranch extends TrieNode {
+ protected int offset;
+ protected ByteMap<TrieNode> children;
+ protected TrieLeaf term;
+
+ protected TrieBranch() {}
+
+ public TrieBranch(byte[] key, long value) {
+ this.children = new ByteMap<TrieNode>();
+ this.term = new TrieLeaf(key, value);
+ this.offset = term.key.length;
+ this.aLeaf = this.term;
+ }
+
+ public TrieBranch(TrieNode oldRoot, byte[] key, long value) {
+ this(oldRoot, new TrieLeaf(key, value));
+ }
+
+ private TrieBranch(TrieNode oldNode, TrieLeaf newNode) {
+ super();
+ assert oldNode != null;
+ assert newNode != null;
+
+ offset = 0;
+ children = new ByteMap<TrieNode>();
+ term = null;
+
+ // as every child node shares a common lcp, any child will suffice when preparing the new
+ // lcp/offset of the new node.
+ aLeaf = newNode;
+
+ byte[] lhs = oldNode.aLeaf.key;
+ byte[] rhs = newNode.key;
+
+ int i = 0;
+ while (i < lhs.length && i < rhs.length) {
+ if (lhs[i] != rhs[i]) {
+ offset = i;
+ children.insert(lhs[i], oldNode);
+ children.insert(rhs[i], newNode);
+
+ return; // Escape here.
+ }
+ i++;
+ }
+
+ if (i < lhs.length) {
+ offset = i;
+ children.insert(lhs[i], oldNode);
+ term = newNode;
+ } else if (i < rhs.length) {
+ if (oldNode instanceof TrieLeaf) {
+ offset = i;
+ children.insert(rhs[i], newNode);
+ term = (TrieLeaf)oldNode;
+ } else {
+ throw new IllegalStateException("Attempt to create new branch node with leaf > branch.");
+ }
+ } else {
+ throw new IllegalStateException("Attempt to create new branch node with equal children");
+ }
+ }
+
+ protected boolean insert(TrieLeaf node, int parentLcp) {
+ if (!regionMatches(aLeaf.key, parentLcp, node.key, parentLcp, offset - parentLcp)) {
+ return false;
+ } else {
+ // new node matches the lcp of this node.
+ if (node.key.length == offset) {
+ // new node is expected to terminate here.
+ if (term == null) {
+ term = node;
+ } else {
+ return term.insert(node, offset);
+ }
+ } else {
+ // new node is expected to terminate in one of this nodes children.
+ TrieNode child = children.lookup(node.key[offset]);
+ if (child == null) {
+ // this is the first node to be inserted on this branching key.
+ children.insert(node.key[offset], node);
+ } else {
+ // there is an existing child node branching on this branching key.
+ if (!child.insert(node, offset)) {
+ children.insert(node.key[offset], new TrieBranch(child, node));
+ }
+ }
+ }
+
+ return true;
+ }
+ }
+
+ protected long lookup(byte[] key, int parentLcd, boolean[] valid) {
+ if (!regionMatches(aLeaf.key, parentLcd, key, parentLcd, offset - parentLcd)) {
+ //FIXME: I need to migrate this to the end of the search to avoid needing access to the key at each level.
+ valid[0] = false;
+ return 0;
+ } else {
+ // new node matches the lcp of this node.
+ TrieNode child;
+ if (key.length == offset) {
+ // new node is expected to terminate here.
+ if (term != null) {
+ valid[0] = true;
+ return term.value;
+ } else {
+ valid[0] = false;
+ return 0;
+ }
+ } else {
+ // new node is expected to terminate in one of this nodes children.
+ child = children.lookup(key[offset]);
+ if (child != null) {
+ return child.lookup(key, offset, valid);
+ } else {
+ valid[0] = false;
+ return 0;
+ }
+ }
+ }
+ }
+
+ public String toString() {
+ return "Trie-BRANCH[" + (term != null) + " on " + offset + " with " + children.size() + " and aLeaf[" + aLeaf + "]";
+ }
+ }
+
+ protected static class TrieLeaf extends TrieNode {
+ final byte[] key;
+ final long value;
+
+ protected TrieLeaf(byte[] key, long value, boolean copyKey) {
+ if (copyKey) {
+ this.key = new byte[key.length];
+ System.arraycopy(key, 0, this.key, 0, key.length);
+ } else {
+ this.key = key;
+ }
+ this.value = value;
+ this.aLeaf = this;
+ }
+
+ public TrieLeaf(byte[] key, long value) {
+ this(key, value, true);
+ }
+
+ protected boolean insert(TrieLeaf node, int parentLcp) {
+ if (key.length != node.key.length) {
+ return false;
+ } else if (!regionMatches(key, parentLcp, node.key, parentLcp, key.length - parentLcp)) {
+ return false;
+ } else if (value == node.value) {
+ return true; // Duplicate key/value pair.
+ } else {
+ throw new IllegalArgumentException("Attempt to insert multiple values for same key");
+ }
+ }
+
+ protected long lookup(byte[] key, int parentLcd, boolean[] valid) {
+ assert Arrays.equals(this.key, key);
+ valid[0] = true;
+ return value;
+ }
+
+ public String toString() {
+ return "Trie-LEAF: " + Arrays.toString(key) + " -> " + value;
+ }
+ }
+}
Copied: projects/xa2/object-pool/src/trie/OnesTable.java (from rev 868, projects/xa2/object-pool/src/gen/OnesTable.java)
===================================================================
--- projects/xa2/object-pool/src/trie/OnesTable.java (rev 0)
+++ projects/xa2/object-pool/src/trie/OnesTable.java 2008-05-06 06:21:36 UTC (rev 898)
@@ -0,0 +1,58 @@
+/*
+ * Copyright Topaz Foundation 2008
+ * Author Andrae Muys
+ * Date 18th April 2008
+ */
+package trie;
+
+import java.io.FileInputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+public class OnesTable {
+ public static final int HIGH = 0;
+ public static final int LOW = 1;
+
+ public static int hiNibbleAsBitInShort(byte b) {
+ return 0x00008000 >> ((b & 0x000000F0) >> 4);
+ }
+
+ public static int lowNibbleAsBitInShort(byte b) {
+ return 0x00008000 >> (b & 0x0000000F);
+ }
+
+ public static int ones(short index) {
+ return shortOnesTable[index & 0x0000FFFF];
+ }
+
+ public static int ones(int nibble, short index, byte key) {
+ // Conversions from signed to unsigned.
+ int idx = index & 0x0000FFFF;
+ int ky = key & 0x000000FF;
+
+ return onesTable[idx * 512 + 2 * ky + nibble];
+ }
+
+ // The number of ones in a 16-bit binary number indexed by 16-bit int.
+ // Note: This is a 64KB array. By using nibbles instead of bytes, and indexing by 8-bit unsigned instead
+ // of 16-bit, we could get away with as little as 127-bytes here.
+ private static final byte[] shortOnesTable;
+
+ // The number of ones in a 16-bit binary number relative to the high/low nibble of an unsigned byte
+ // represented as a single bit in a 16-bit unsigned int.
+ // Note: This is a 32MB array - this can be easily reduced to as little as 16K by moving bit manipulation
+ // operations from the initialisation below to the lookup functions above. Classic time/space tradeoff.
+ private static final byte[] onesTable;
+
+ static {
+ try {
+ FileChannel fc = new FileInputStream("OnesTable.dat").getChannel();
+ shortOnesTable = new byte[64*1024];
+ fc.read(ByteBuffer.wrap(shortOnesTable));
+ onesTable = new byte[64*1024 * 2*256];
+ fc.read(ByteBuffer.wrap(onesTable));
+ } catch (Exception e) {
+ throw new IllegalStateException("Failed to initialize OnesTable", e);
+ }
+ }
+}
More information about the Mulgara-svn
mailing list