Index: /applications/editors/josm/plugins/imagerycache/build.xml
===================================================================
--- /applications/editors/josm/plugins/imagerycache/build.xml	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/build.xml	(revision 29484)
@@ -15,5 +15,5 @@
 
     <!-- enter the SVN commit message -->
-    <property name="commit.message" value="JOSM/ImageryCache: corrected cache file name, added imagerycache.debug option"/>
+    <property name="commit.message" value="JOSM/ImageryCache: updated MapDB (no more deadlocks, Java 1.6 compatible), less crashes, multiple-JOSM support"/>
     <!-- enter the *lowest* JOSM version this plugin is currently compatible with -->
     <property name="plugin.main.version" value="5779"/>
@@ -33,4 +33,7 @@
     <!-- ** include targets that all plugins have in common ** -->
     <import file="../build-common.xml"/>
-  
+    <target name="runmain" depends="install">
+        <java classname="${plugin.class}" classpath="${plugin.jar};${josm}" fork="true">
+        </java>
+    </target>
 </project>
Index: /applications/editors/josm/plugins/imagerycache/changelog
===================================================================
--- /applications/editors/josm/plugins/imagerycache/changelog	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/changelog	(revision 29484)
@@ -0,0 +1,19 @@
+15.03.2013, 29363: Initial commit
+07.04.2013 : 
+* Updated MapDB from GitHub, version 06.04, occasional deadlock on start fixed 
+* Java 1.6 compatibilty due to update of MapDB
+* Metadata is loaded from database correctly
+* Code refactoring, iterfaces introduces
+* Stop background database operations on closing (much less errors on shutdown), needs MapDB one-line hack
+* Multiple JOSM instances now store JOSM in multiple files (Mapnik, Mapnik_1, Mapnik_2, etc.) , Mapnik.lock file indicates that the file is currently used
+* DAO object is a singleton now, one file per JVM will be created
+* Cache directory is created if needed
+
+
+
+        
+
+         
+           
+
+
Index: /applications/editors/josm/plugins/imagerycache/nbproject/project.xml
===================================================================
--- /applications/editors/josm/plugins/imagerycache/nbproject/project.xml	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/nbproject/project.xml	(revision 29484)
@@ -22,4 +22,14 @@
                     <encoding>UTF-8</encoding>
                 </source-folder>
+                <source-folder>
+                    <label>F:\Java\josm\core\src</label>
+                    <type>java</type>
+                    <location>F:\Java\josm\core\src</location>
+                    <encoding>UTF-8</encoding>
+                </source-folder>
+                <source-folder>
+                    <label>F:\Java\josm\core\src</label>
+                    <location>F:\Java\josm\core\src</location>
+                </source-folder>
             </folders>
             <ide-actions>
@@ -37,4 +47,7 @@
                     <target>compile</target>
                 </action>
+                <action name="test">
+                    <target>runmain</target>
+                </action>
             </ide-actions>
             <view>
@@ -43,4 +56,8 @@
                         <label>src</label>
                         <location>src</location>
+                    </source-folder>
+                    <source-folder style="packages">
+                        <label>F:\Java\josm\core\src</label>
+                        <location>F:\Java\josm\core\src</location>
                     </source-folder>
                     <source-file>
@@ -53,4 +70,5 @@
                     <ide-action name="clean"/>
                     <ide-action name="run"/>
+                    <ide-action name="test"/>
                 </context-menu>
             </view>
@@ -63,4 +81,8 @@
                 <source-level>1.6</source-level>
             </compilation-unit>
+            <compilation-unit>
+                <package-root>F:\Java\josm\core\src</package-root>
+                <source-level>1.6</source-level>
+            </compilation-unit>
         </java-data>
     </configuration>
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/AsyncWriteEngine.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/AsyncWriteEngine.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/AsyncWriteEngine.java	(revision 29484)
@@ -43,5 +43,5 @@
 
     protected static final Object DELETED = new Object();
-    protected final Locks.RecidLocks writeLocks = new Locks.LongHashMapRecidLocks();
+    protected final ReentrantLock[] writeLocks = Utils.newLocks(32);
 
     protected final ReentrantReadWriteLock commitLock;
@@ -77,12 +77,33 @@
                     if(!iter.moveToNext()){
                         //empty map, pause for a moment to give it chance to fill
-                        if(closeInProgress || (parentEngineWeakRef!=null && parentEngineWeakRef.get()==null) || writerFailedException!=null) return;
+                        if( (parentEngineWeakRef!=null && parentEngineWeakRef.get()==null) || writerFailedException!=null) return;
                         Thread.sleep(asyncFlushDelay);
-
+                        if(closeInProgress){
+                            //lock world and write everything
+                            Utils.lockAll(writeLocks);
+                            try{
+                                while(!items.isEmpty()){
+                                    iter = items.longMapIterator();
+                                    while(iter.moveToNext()){
+                                        long recid = iter.key();
+                                        Fun.Tuple2<Object,Serializer> value = iter.value();
+                                        if(value.a==DELETED){
+                                            AsyncWriteEngine.super.delete(recid, value.b);
+                                        }else{
+                                            AsyncWriteEngine.super.update(recid, value.a, value.b);
+                                        }
+                                        items.remove(recid, value);
+                                    }
+                                }
+                                return;
+                            }finally{
+                                Utils.unlockAll(writeLocks);
+                            }
+                        }
                     }else do{
                         //iterate over items and write them
                         long recid = iter.key();
 
-                        writeLocks.lock(recid);
+                        Utils.lock(writeLocks,recid);
                         try{
                             Fun.Tuple2<Object,Serializer> value = iter.value();
@@ -94,5 +115,5 @@
                             items.remove(recid, value);
                         }finally {
-                            writeLocks.unlock(recid);
+                            Utils.unlock(writeLocks, recid);
                         }
                     }while(iter.moveToNext());
@@ -124,12 +145,8 @@
     @Override
     public <A> long put(A value, Serializer<A> serializer) {
-        checkState();
-
-
         if(commitLock!=null) commitLock.readLock().lock();
         try{
-
             try {
-                Long recid = newRecids.take();
+                Long recid = newRecids.take(); //TODO possible deadlock while closing
                 update(recid, value, serializer);
                 return recid;
@@ -150,9 +167,9 @@
     @Override
     public <A> A get(long recid, Serializer<A> serializer) {
-        checkState();
         if(commitLock!=null) commitLock.readLock().lock();
         try{
-            writeLocks.lock(recid);
+            Utils.lock(writeLocks,recid);
             try{
+                checkState();
                 Fun.Tuple2<Object,Serializer> item = items.get(recid);
                 if(item!=null){
@@ -163,5 +180,5 @@
                 return super.get(recid, serializer);
             }finally{
-                writeLocks.unlock(recid);
+                Utils.unlock(writeLocks,recid);
             }
         }finally{
@@ -172,16 +189,17 @@
     @Override
     public <A> void update(long recid, A value, Serializer<A> serializer) {
-        checkState();
-        if(commitLock!=null) commitLock.readLock().lock();
-        try{
-
-            writeLocks.lock(recid);
+
+        if(commitLock!=null && serializer!=SerializerPojo.serializer) commitLock.readLock().lock();
+        try{
+
+            Utils.lock(writeLocks, recid);
             try{
+                checkState();
                 items.put(recid, new Fun.Tuple2(value,serializer));
             }finally{
-                writeLocks.unlock(recid);
+                Utils.unlock(writeLocks, recid);
             }
         }finally{
-            if(commitLock!=null) commitLock.readLock().unlock();
+            if(commitLock!=null&& serializer!=SerializerPojo.serializer) commitLock.readLock().unlock();
         }
 
@@ -190,7 +208,8 @@
     @Override
     public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
-        checkState();
-        writeLocks.lock(recid);
-        try{
+        //TODO commit lock?
+        Utils.lock(writeLocks, recid);
+        try{
+            checkState();
             Fun.Tuple2<Object, Serializer> existing = items.get(recid);
             A oldValue = existing!=null? (A) existing.a : super.get(recid, serializer);
@@ -202,5 +221,5 @@
             }
         }finally{
-            writeLocks.unlock(recid);
+            Utils.unlock(writeLocks, recid);
 
         }
@@ -277,6 +296,6 @@
         try{
             while(!items.isEmpty()) LockSupport.parkNanos(100);
-
-            super.commit();
+            newRecids.clear();
+            super.rollback();
         }finally {
             commitLock.writeLock().unlock();
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeKeySerializer.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeKeySerializer.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeKeySerializer.java	(revision 29484)
@@ -83,5 +83,5 @@
         @Override
         public Object[] deserialize(DataInput in, int start, int end, int size) throws IOException {
-            Object[] ret = new Long[size];
+            Object[] ret = new Integer[size];
             int prev = 0 ;
             for(int i = start; i<end; i++){
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeMap.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeMap.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeMap.java	(revision 29484)
@@ -114,5 +114,5 @@
 
     /** holds node level locks*/
-    protected final Locks.RecidLocks nodeLocks = new Locks.LongHashMapRecidLocks();
+    protected final LongConcurrentHashMap<Thread> nodeLocks = new LongConcurrentHashMap<Thread>();
 
     /** maximal node size allowed in this BTree*/
@@ -138,4 +138,5 @@
     private final Values values = new Values(this);
     protected final Serializer defaultSerializer;
+    protected final Atomic.Long counter;
 
 
@@ -154,4 +155,5 @@
             out.writeBoolean(value.valsOutsideNodes);
             out.writeInt(value.maxNodeSize);
+            out.writeLong(value.counterRecid);
             defaultSerializer.serialize(out, value.keySerializer);
             defaultSerializer.serialize(out, value.valueSerializer);
@@ -168,4 +170,5 @@
             ret.valsOutsideNodes = in.readBoolean();
             ret.maxNodeSize = in.readInt();
+            ret.counterRecid = in.readLong();
             ret.keySerializer = (BTreeKeySerializer) defaultSerializer.deserialize(in, -1);
             ret.valueSerializer = (Serializer) defaultSerializer.deserialize(in, -1);
@@ -181,10 +184,8 @@
         boolean valsOutsideNodes;
         int maxNodeSize;
+        long counterRecid;
         BTreeKeySerializer keySerializer;
         Serializer valueSerializer;
         Comparator comparator;
-
-
-
     }
 
@@ -425,5 +426,5 @@
      * @param comparator Comparator to sort keys in this BTree, may be null.
      */
-    public BTreeMap(Engine engine, int maxNodeSize, boolean hasValues, boolean valsOutsideNodes,
+    public BTreeMap(Engine engine, int maxNodeSize, boolean hasValues, boolean valsOutsideNodes, boolean keepCounter,
                     Serializer defaultSerializer,
                     BTreeKeySerializer<K> keySerializer, Serializer<V> valueSerializer, Comparator<K> comparator) {
@@ -449,4 +450,5 @@
         this.valueSerializer = valueSerializer==null ? (Serializer<V>) defaultSerializer : valueSerializer;
 
+
         this.keySet = new KeySet(this, hasValues);
 
@@ -454,4 +456,13 @@
         long rootRecidVal = engine.put(emptyRoot, nodeSerializer);
         rootRecidRef = engine.put(rootRecidVal,Serializer.LONG_SERIALIZER);
+
+        long counterRecid = 0;
+        if(keepCounter){
+            counterRecid = engine.put(0L, Serializer.LONG_SERIALIZER);
+            this.counter = new Atomic.Long(engine,counterRecid);
+            Bind.size(this,counter);
+        }else{
+            this.counter = null;
+        }
 
         BTreeRoot r = new BTreeRoot();
@@ -463,5 +474,8 @@
         r.valueSerializer =  this.valueSerializer;
         r.comparator =  this.comparator;
+        r.counterRecid = counterRecid;
         this.treeRecid = engine.put(r, new BTreeRootSerializer(this.defaultSerializer));
+
+
     }
 
@@ -491,5 +505,13 @@
         this.valsOutsideNodes = r.valsOutsideNodes;
 
+
         this.keySet = new KeySet(this, hasValues);
+
+        if(r.counterRecid!=0){
+            counter = new Atomic.Long(engine,r.counterRecid);
+            Bind.size(this,counter);
+        }else{
+            this.counter = null;
+        }
     }
 
@@ -617,5 +639,5 @@
             boolean found;
             do{
-                nodeLocks.lock(current);
+                Utils.lock(nodeLocks, current);
                 found = true;
                 A = engine.get(current, nodeSerializer);
@@ -627,6 +649,6 @@
                     if(putOnlyIfAbsent){
                         //is not absent, so quit
-                        nodeLocks.unlock(current);
-                        nodeLocks.assertNoLocks();
+                        Utils.unlock(nodeLocks, current);
+                        Utils.assertNoLocks(nodeLocks);
                         V ret =  valExpand(oldVal);
                         notify(v,ret, value2);
@@ -643,6 +665,6 @@
                     engine.update(current, A, nodeSerializer);
                     //already in here
-                    nodeLocks.unlock(current);
-                    nodeLocks.assertNoLocks();
+                    Utils.unlock(nodeLocks, current);
+                    Utils.assertNoLocks(nodeLocks);
                     V ret =  valExpand(oldVal);
                     notify(v,ret, value2);
@@ -652,5 +674,5 @@
                 if(A.highKey() != null && comparator.compare(v, A.highKey())>0){
                     //follow link until necessary
-                    nodeLocks.unlock(current);
+                    Utils.unlock(nodeLocks, current);
                     found = false;
                     int pos2 = findChildren(v, A.keys());
@@ -686,6 +708,6 @@
                 }
 
-                nodeLocks.unlock(current);
-                nodeLocks.assertNoLocks();
+                Utils.unlock(nodeLocks, current);
+                Utils.assertNoLocks(nodeLocks);
                 notify(v,  null, value2);
                 return null;
@@ -734,5 +756,5 @@
 
                 if(!isRoot){
-                    nodeLocks.unlock(current);
+                    Utils.unlock(nodeLocks, current);
                     p = q;
                     v = (K) A.highKey();
@@ -757,6 +779,6 @@
 
                     //TODO update tree levels
-                    nodeLocks.unlock(current);
-                    nodeLocks.assertNoLocks();
+                    Utils.unlock(nodeLocks, current);
+                    Utils.assertNoLocks(nodeLocks);
                     notify(v, null, value2);
                     return null;
@@ -843,5 +865,5 @@
         while(true){
 
-            nodeLocks.lock(current);
+            Utils.lock(nodeLocks, current);
             A = engine.get(current, nodeSerializer);
             int pos = findChildren(key, A.keys());
@@ -852,10 +874,10 @@
                 oldVal = valExpand(oldVal);
                 if(value!=null && !value.equals(oldVal)){
-                    nodeLocks.unlock(current);
+                    Utils.unlock(nodeLocks, current);
                     return null;
                 }
                 //check for last node which was already deleted
                 if(pos == A.keys().length-1 && value == null){
-                    nodeLocks.unlock(current);
+                    Utils.unlock(nodeLocks, current);
                     return null;
                 }
@@ -874,9 +896,9 @@
                 A = new LeafNode(keys2, vals2, ((LeafNode)A).next);
                 engine.update(current, A, nodeSerializer);
-                nodeLocks.unlock(current);
+                Utils.unlock(nodeLocks, current);
                 notify((K)key, (V)oldVal, null);
                 return (V) oldVal;
             }else{
-                nodeLocks.unlock(current);
+                Utils.unlock(nodeLocks, current);
                 //follow link until necessary
                 if(A.highKey() != null && comparator.compare(key, A.highKey())>0){
@@ -960,4 +982,7 @@
     @Override
     public int size(){
+        if(counter!=null)
+            return (int) counter.get(); //TODO larger then MAX_INT
+
         long size = 0;
         BTreeIterator iter = new BTreeIterator();
@@ -994,5 +1019,5 @@
         }
 
-        nodeLocks.lock(current);
+        Utils.lock(nodeLocks, current);
         LeafNode leaf = (LeafNode) engine.get(current, nodeSerializer);
 
@@ -1000,6 +1025,6 @@
         while(pos==leaf.keys.length){
             //follow leaf link until necessary
-            nodeLocks.lock(leaf.next);
-            nodeLocks.unlock(current);
+            Utils.lock(nodeLocks, leaf.next);
+            Utils.unlock(nodeLocks, current);
             current = leaf.next;
             leaf = (LeafNode) engine.get(current, nodeSerializer);
@@ -1027,5 +1052,5 @@
             }
         }
-        nodeLocks.unlock(current);
+        Utils.unlock(nodeLocks, current);
         return ret;
     }
@@ -1043,5 +1068,5 @@
         }
 
-        nodeLocks.lock(current);
+        Utils.lock(nodeLocks, current);
         LeafNode leaf = (LeafNode) engine.get(current, nodeSerializer);
 
@@ -1049,6 +1074,6 @@
         while(pos==leaf.keys.length){
             //follow leaf link until necessary
-            nodeLocks.lock(leaf.next);
-            nodeLocks.unlock(current);
+            Utils.lock(nodeLocks, leaf.next);
+            Utils.unlock(nodeLocks, current);
             current = leaf.next;
             leaf = (LeafNode) engine.get(current, nodeSerializer);
@@ -1073,5 +1098,5 @@
 
         }
-        nodeLocks.unlock(current);
+        Utils.unlock(nodeLocks, current);
         return (V)ret;
     }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheHashTable.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheHashTable.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheHashTable.java	(revision 29484)
@@ -17,4 +17,6 @@
 package org.mapdb;
 
+import java.util.concurrent.locks.ReentrantLock;
+
 /**
  * Fixed size cache which uses hash table.
@@ -29,5 +31,5 @@
 
 
-    protected final Locks.RecidLocks locks = new Locks.SegmentedRecidLocks(16);
+    protected final ReentrantLock[] locks = Utils.newLocks(32);
 
     protected HashItem[] items;
@@ -63,8 +65,8 @@
         final int pos = position(recid);
         try{
-            locks.lock(pos);
+            Utils.lock(locks,pos);
             checkClosed(items)[position(recid)] = new HashItem(recid, value);
         }finally{
-            locks.unlock(pos);
+            Utils.unlock(locks,pos);
         }
         return recid;
@@ -81,5 +83,5 @@
 
         try{
-            locks.lock(pos);
+            Utils.lock(locks,pos);
             //not in cache, fetch and add
             final A value = getWrappedEngine().get(recid, serializer);
@@ -88,5 +90,5 @@
             return value;
         }finally{
-            locks.unlock(pos);
+            Utils.unlock(locks,pos);
         }
     }
@@ -100,9 +102,9 @@
         final int pos = position(recid);
         try{
-            locks.lock(pos);
+            Utils.lock(locks,pos);
             checkClosed(items)[pos] = new HashItem(recid, value);
             getWrappedEngine().update(recid, value, serializer);
         }finally {
-            locks.unlock(pos);
+            Utils.unlock(locks,pos);
         }
     }
@@ -113,5 +115,5 @@
         try{
             HashItem[] items2 = checkClosed(items);
-            locks.lock(pos);
+            Utils.lock(locks,pos);
             HashItem item = items2[pos];
             if(item!=null && item.key == recid){
@@ -131,5 +133,5 @@
             }
         }finally {
-            locks.unlock(pos);
+            Utils.unlock(locks,pos);
         }
     }
@@ -139,5 +141,5 @@
         final int pos = position(recid);
         try{
-            locks.lock(recid);
+            Utils.lock(locks,pos);
             getWrappedEngine().delete(recid,serializer);
             HashItem[] items2 = checkClosed(items);
@@ -146,5 +148,5 @@
             items[pos] = null;
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,pos);
         }
 
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheLRU.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheLRU.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheLRU.java	(revision 29484)
@@ -1,3 +1,5 @@
 package org.mapdb;
+
+import java.util.concurrent.locks.ReentrantLock;
 
 /**
@@ -10,5 +12,5 @@
     protected LongMap<Object> cache;
 
-    protected final Locks.RecidLocks locks = new Locks.SegmentedRecidLocks(16);
+    protected final ReentrantLock[] locks = Utils.newLocks(32);
 
 
@@ -26,8 +28,8 @@
         long recid =  super.put(value, serializer);
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             checkClosed(cache).put(recid, value);
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
         return recid;
@@ -40,10 +42,10 @@
         if(ret!=null) return (A) ret;
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             ret = super.get(recid, serializer);
             if(ret!=null) checkClosed(cache).put(recid, ret);
             return (A) ret;
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -52,9 +54,9 @@
     public <A> void update(long recid, A value, Serializer<A> serializer) {
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             checkClosed(cache).put(recid, value);
             super.update(recid, value, serializer);
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -63,9 +65,9 @@
     public <A> void delete(long recid, Serializer<A> serializer){
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             checkClosed(cache).remove(recid);
             super.delete(recid,serializer);
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -74,5 +76,5 @@
     public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             Engine engine = getWrappedEngine();
             LongMap cache2 = checkClosed(cache);
@@ -89,5 +91,5 @@
             }
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheWeakSoftRef.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheWeakSoftRef.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheWeakSoftRef.java	(revision 29484)
@@ -20,4 +20,5 @@
 import java.lang.ref.SoftReference;
 import java.lang.ref.WeakReference;
+import java.util.concurrent.locks.ReentrantLock;
 
 /**
@@ -30,5 +31,5 @@
 
 
-    protected final Locks.RecidLocks locks = new Locks.LongHashMapRecidLocks();
+    protected final ReentrantLock[] locks = Utils.newLocks(32);
 
     protected interface CacheItem{
@@ -130,5 +131,5 @@
 
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             Object value = getWrappedEngine().get(recid, serializer);
             if(value!=null) putItemIntoCache(recid, value);
@@ -136,5 +137,5 @@
             return (A) value;
         }finally{
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
 
@@ -144,9 +145,9 @@
     public <A> void update(long recid, A value, Serializer<A> serializer) {
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             putItemIntoCache(recid, value);
             getWrappedEngine().update(recid, value, serializer);
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -163,9 +164,9 @@
     public <A> void delete(long recid, Serializer<A> serializer){
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             checkClosed(items).remove(recid);
             getWrappedEngine().delete(recid,serializer);
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
 
@@ -175,5 +176,5 @@
     public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
         try{
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             CacheItem item = checkClosed(items).get(recid);
             Object oldValue = item==null? null: item.get() ;
@@ -190,5 +191,5 @@
             }
         }finally {
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CompressLZF.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CompressLZF.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/CompressLZF.java	(revision 29484)
@@ -53,5 +53,4 @@
 import java.io.DataOutput;
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.Arrays;
 
@@ -297,5 +296,10 @@
         @Override
         public void serialize(DataOutput out, byte[] value) throws IOException {
-            if (value == null) return;
+            if (value == null|| value.length==0){
+                //in this case do not compress data, write 0 as indicator
+                Utils.packInt(out, 0);
+                out.write(value);
+                return;
+            }
 
             CompressLZF lzf = LZF.get();
@@ -341,31 +345,8 @@
      * Wraps existing serializer and compresses its input/output
      */
-    public static <E> Serializer<E> serializerCompressWrapper(Serializer<E> serializer) {
-        return new SerializerCompressWrapper<E>(serializer);
-    }
-
-
-    protected static class SerializerCompressWrapper<E> implements Serializer<E>, Serializable {
-        protected final Serializer<E> serializer;
-        public SerializerCompressWrapper(Serializer<E> serializer) {
-            this.serializer = serializer;
-        }
-
-        @Override
-        public void serialize(DataOutput out, E value) throws IOException {
-            //serialize to byte[]
-            DataOutput2 out2 = new DataOutput2();
-            serializer.serialize(out2, value);
-            byte[] b = out2.copyBytes();
-            CompressLZF.SERIALIZER.serialize(out, b);
-        }
-
-        @Override
-        public E deserialize(DataInput in, int available) throws IOException {
-            byte[] b = CompressLZF.SERIALIZER.deserialize(in, available);
-            DataInput2 in2 = new DataInput2(b);
-            return serializer.deserialize(in2, b.length);
-        }
-    }
+    public static <E> Serializer<E> CompressionWrapper(Serializer<E> serializer) {
+        return new Serializer.CompressSerializerWrapper<E>(serializer);
+    }
+
 
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DB.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DB.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DB.java	(revision 29484)
@@ -83,5 +83,5 @@
         }else{
             //create new map
-            ret = new HTreeMap<K,V>(engine,true,Utils.RANDOM.nextInt(), defaultSerializer,null, null);
+            ret = new HTreeMap<K,V>(engine,true,false,Utils.RANDOM.nextInt(), defaultSerializer,null, null);
             nameDir.put(name, ret.rootRecid);
         }
@@ -95,4 +95,5 @@
      *
      * @param name of map to create
+     * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items.
      * @param keySerializer used to convert keys into/from binary form. Use null for default value.
      * @param valueSerializer used to convert values into/from binary form. Use null for default value.
@@ -103,7 +104,7 @@
      */
     synchronized public <K,V> HTreeMap<K,V> createHashMap(
-            String name, Serializer<K> keySerializer, Serializer<V> valueSerializer){
-        checkNameNotExists(name);
-        HTreeMap<K,V> ret = new HTreeMap<K,V>(engine, true,Utils.RANDOM.nextInt(), defaultSerializer, keySerializer, valueSerializer);
+            String name, boolean keepCounter, Serializer<K> keySerializer, Serializer<V> valueSerializer){
+        checkNameNotExists(name);
+        HTreeMap<K,V> ret = new HTreeMap<K,V>(engine, true,keepCounter,Utils.RANDOM.nextInt(), defaultSerializer, keySerializer, valueSerializer);
         nameDir.put(name, ret.rootRecid);
         collections.put(name, new WeakReference<Object>(ret));
@@ -130,5 +131,5 @@
         }else{
             //create new map
-            HTreeMap<K,Object> m = new HTreeMap<K,Object>(engine, false,Utils.RANDOM.nextInt(), defaultSerializer, null, null);
+            HTreeMap<K,Object> m = new HTreeMap<K,Object>(engine, false,false, Utils.RANDOM.nextInt(), defaultSerializer, null, null);
             ret = m.keySet();
             nameDir.put(name, m.rootRecid);
@@ -142,13 +143,13 @@
      * Creates new HashSet
      * @param name of set to create
+     * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items.
      * @param serializer used to convert keys into/from binary form. Use null for default value.
      * @param <K> item type
      * @throws IllegalArgumentException if name is already used
-
      */
     
-    synchronized public <K> Set<K> createHashSet(String name, Serializer<K> serializer){
-        checkNameNotExists(name);
-        HTreeMap<K,Object> ret = new HTreeMap<K,Object>(engine, false,Utils.RANDOM.nextInt(), defaultSerializer, serializer, null);
+    synchronized public <K> Set<K> createHashSet(String name, boolean keepCounter, Serializer<K> serializer){
+        checkNameNotExists(name);
+        HTreeMap<K,Object> ret = new HTreeMap<K,Object>(engine, false,keepCounter,Utils.RANDOM.nextInt(), defaultSerializer, serializer, null);
         nameDir.put(name, ret.rootRecid);
         Set<K> ret2 = ret.keySet();
@@ -180,5 +181,5 @@
         }else{
             //create new map
-            ret = new BTreeMap<K,V>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE, true, false, defaultSerializer, null, null, null);
+            ret = new BTreeMap<K,V>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE, true, false,false, defaultSerializer, null, null, null);
             nameDir.put(name, ret.treeRecid);
         }
@@ -192,4 +193,5 @@
      * @param nodeSize maximal size of node, larger node causes overflow and creation of new BTree node. Use large number for small keys, use small number for large keys.
      * @param valuesStoredOutsideNodes if true, values are stored outside of BTree nodes. Use 'true' if your values are large.
+     * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items.
      * @param keySerializer used to convert keys into/from binary form. Use null for default value.
      * @param valueSerializer used to convert values into/from binary form. Use null for default value.
@@ -201,8 +203,8 @@
      */
     synchronized public <K,V> BTreeMap<K,V> createTreeMap(
-            String name, int nodeSize, boolean valuesStoredOutsideNodes,
+            String name, int nodeSize, boolean valuesStoredOutsideNodes, boolean keepCounter,
             BTreeKeySerializer<K> keySerializer, Serializer<V> valueSerializer, Comparator<K> comparator){
         checkNameNotExists(name);
-        BTreeMap<K,V> ret = new BTreeMap<K,V>(engine, nodeSize, true,valuesStoredOutsideNodes, defaultSerializer, keySerializer, valueSerializer, comparator);
+        BTreeMap<K,V> ret = new BTreeMap<K,V>(engine, nodeSize, true,valuesStoredOutsideNodes, keepCounter,defaultSerializer, keySerializer, valueSerializer, comparator);
         nameDir.put(name, ret.treeRecid);
         collections.put(name, new WeakReference<Object>(ret));
@@ -240,5 +242,5 @@
             //create new map
             BTreeMap<K,Object> m =  new BTreeMap<K,Object>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE,
-                    false, false, defaultSerializer, null, null, null);
+                    false, false,false, defaultSerializer, null, null, null);
             nameDir.put(name, m.treeRecid);
             ret = m.keySet();
@@ -253,4 +255,5 @@
      * @param name of set to create
      * @param nodeSize maximal size of node, larger node causes overflow and creation of new BTree node. Use large number for small keys, use small number for large keys.
+     * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items.
      * @param serializer used to convert keys into/from binary form. Use null for default value.
      * @param comparator used to sort keys. Use null for default value. TODO delta packing
@@ -259,7 +262,7 @@
      * @return
      */
-    synchronized public <K> NavigableSet<K> createTreeSet(String name, int nodeSize, BTreeKeySerializer<K> serializer, Comparator<K> comparator){
-        checkNameNotExists(name);
-        BTreeMap<K,Object> ret = new BTreeMap<K,Object>(engine, nodeSize, false, false, defaultSerializer, serializer, null, comparator);
+    synchronized public <K> NavigableSet<K> createTreeSet(String name,int nodeSize, boolean keepCounter, BTreeKeySerializer<K> serializer, Comparator<K> comparator){
+        checkNameNotExists(name);
+        BTreeMap<K,Object> ret = new BTreeMap<K,Object>(engine, nodeSize, false, false, keepCounter, defaultSerializer, serializer, null, comparator);
         nameDir.put(name, ret.treeRecid);
         NavigableSet<K> ret2 = ret.keySet();
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DBMaker.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DBMaker.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/DBMaker.java	(revision 29484)
@@ -60,5 +60,5 @@
     protected byte[] _xteaEncryptionKey = null;
 
-    protected boolean _freeSpaceReclaimDisabled = false;
+    protected int _freeSpaceReclaimQ = 5;
 
     protected boolean _checksumEnabled = false;
@@ -124,5 +124,5 @@
                 .deleteFilesAfterClose()
                 .closeOnJvmShutdown()
-                .journalDisable()
+                .writeAheadLogDisable()
                 .make()
                 .getTreeMap("temp");
@@ -139,5 +139,5 @@
                 .deleteFilesAfterClose()
                 .closeOnJvmShutdown()
-                .journalDisable()
+                .writeAheadLogDisable()
                 .make()
                 .getHashMap("temp");
@@ -154,5 +154,5 @@
                 .deleteFilesAfterClose()
                 .closeOnJvmShutdown()
-                .journalDisable()
+                .writeAheadLogDisable()
                 .make()
                 .getTreeSet("temp");
@@ -169,5 +169,5 @@
                 .deleteFilesAfterClose()
                 .closeOnJvmShutdown()
-                .journalDisable()
+                .writeAheadLogDisable()
                 .make()
                 .getHashSet("temp");
@@ -210,5 +210,5 @@
      * @return this builder
      */
-    public DBMaker journalDisable(){
+    public DBMaker writeAheadLogDisable(){
         this._journalEnabled = false;
         return this;
@@ -485,23 +485,16 @@
 
     /**
-     * In this mode existing free space is not reused,
-     * but records are added to the end of the store.
-     * <p/>
-     * This slightly improves write performance as store does not have
-     * to traverse list of free records to find and reuse existing position.
-     * <p/>
-     * It also decreases chance for store corruption, as existing data
-     * are not overwritten with new record.
-     * <p/>
-     * When this mode is used for longer time, store becomes fragmented.
-     * It is necessary to run defragmentation then.
-     * <p/>
-     * NOTE: this mode is not append-only, just small setting for update-in-place storage.
-     *
-     *
-     * @return this builder
-     */
-    public DBMaker freeSpaceReclaimDisable(){
-        this._freeSpaceReclaimDisabled = true;
+     * Set free space reclaim Q.  It is value from 0 to 10, indicating how eagerly MapDB
+     * searchs for free space inside store to reuse, before expanding store file.
+     * 0 means that no free space will be reused and store file will just grow (effectively append only).
+     * 10 means that MapDB tries really hard to reuse free space, even if it may hurt performance.
+     * Default value is 5;
+     *
+     *
+     * @return this builder
+     */
+    public DBMaker freeSpaceReclaimQ(int q){
+        if(q<0||q>10) throw new IllegalArgumentException("wrong Q");
+        this._freeSpaceReclaimQ = q;
         return this;
     }
@@ -536,8 +529,8 @@
      * @return this builder
      */
-    public DBMaker powerSavingModeEnable(){
-        this._powerSavingMode = true;
-        return this;
-    }
+//    public DBMaker powerSavingModeEnable(){
+//        this._powerSavingMode = true;
+//        return this;
+//    }
 
 
@@ -559,5 +552,5 @@
             throw new UnsupportedOperationException("Can not open in-memory DB in read-only mode.");
 
-        if(_readOnly && !_file.exists()){
+        if(_readOnly && !_file.exists() && !_appendStorage){
             throw new UnsupportedOperationException("Can not open non-existing file in read-only mode.");
         }
@@ -571,10 +564,27 @@
 
             engine = _journalEnabled ?
-                new StorageJournaled(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose, _failOnWrongHeader, _readOnly):
-                new StorageDirect(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose , _failOnWrongHeader, _readOnly);
+                    //TODO add extra params
+                //new StoreWAL(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose, _failOnWrongHeader, _readOnly):
+                //new StoreDirect(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose , _failOnWrongHeader, _readOnly);
+                new StoreWAL(folFac,  _readOnly,_deleteFilesAfterClose):
+                new StoreDirect(folFac,  _readOnly,_deleteFilesAfterClose);
         }else{
             if(_file==null) throw new UnsupportedOperationException("Append Storage format is not supported with in-memory dbs");
-            engine = new StorageAppend(_file, _RAF, _readOnly, !_journalEnabled);
-        }
+            engine = new StoreAppend(_file, _RAF, _readOnly, !_journalEnabled);
+        }
+
+        if(_checksumEnabled){
+            engine = new ByteTransformEngine(engine, Serializer.CRC32_CHECKSUM);
+        }
+
+        if(_xteaEncryptionKey!=null){
+            engine = new ByteTransformEngine(engine, new EncryptionXTEA(_xteaEncryptionKey));
+        }
+
+
+        if(_compressionEnabled){
+            engine = new ByteTransformEngine(engine, CompressLZF.SERIALIZER);
+        }
+
 
         AsyncWriteEngine engineAsync = null;
@@ -584,16 +594,4 @@
         }
 
-        if(_checksumEnabled){
-            engine = new ByteTransformEngine(engine, Serializer.CRC32_CHECKSUM);
-        }
-
-        if(_xteaEncryptionKey!=null){
-            engine = new ByteTransformEngine(engine, new EncryptionXTEA(_xteaEncryptionKey));
-        }
-
-
-        if(_compressionEnabled){
-            engine = new ByteTransformEngine(engine, CompressLZF.SERIALIZER);
-        }
 
         engine = new SnapshotEngine(engine);
@@ -625,4 +623,7 @@
                 @Override
 				public void run() {
+                    
+                    // for JOSM plugin ImageryCache
+                    org.openstreetmap.josm.plugins.imagerycache.TileDAOMapDB.dbNotAvailable = true;
                     if(!engine2.isClosed())
                         engine2.close();
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/EngineWrapper.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/EngineWrapper.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/EngineWrapper.java	(revision 29484)
@@ -378,4 +378,68 @@
         }
     }
+    
+    
+    /** Engine wrapper with all methods synchronized on global lock, useful to diagnose concurrency issues.*/ 
+    public static class SynchronizedEngineWrapper extends EngineWrapper{
+
+        protected SynchronizedEngineWrapper(Engine engine) {
+            super(engine);
+        }
+
+        @Override
+        synchronized public <A> long put(A value, Serializer<A> serializer) {
+            return super.put(value, serializer);
+        }
+
+        @Override
+        synchronized public <A> A get(long recid, Serializer<A> serializer) {
+            return super.get(recid, serializer);
+        }
+
+        @Override
+        synchronized public <A> void update(long recid, A value, Serializer<A> serializer) {
+            super.update(recid, value, serializer);
+        }
+
+        @Override
+        synchronized public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
+            return super.compareAndSwap(recid, expectedOldValue, newValue, serializer);
+        }
+
+        @Override
+        synchronized public <A> void delete(long recid, Serializer<A> serializer) {
+            super.delete(recid, serializer);
+        }
+
+        @Override
+        synchronized public void close() {
+            super.close();
+        }
+
+        @Override
+        synchronized public boolean isClosed() {
+            return super.isClosed();
+        }
+
+        @Override
+        synchronized public void commit() {
+            super.commit();
+        }
+
+        @Override
+        synchronized public void rollback() {
+            super.rollback();
+        }
+
+        @Override
+        synchronized public boolean isReadOnly() {
+            return super.isReadOnly();
+        }
+
+        @Override
+        synchronized public void compact() {
+            super.compact();
+        }
+    }
 
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/HTreeMap.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/HTreeMap.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/HTreeMap.java	(revision 29484)
@@ -52,4 +52,5 @@
     protected final int hashSalt;
 
+    protected final Atomic.Long counter;
 
     protected final Serializer<K> keySerializer;
@@ -86,4 +87,5 @@
             out.writeBoolean(value.hasValues);
             out.writeInt(value.hashSalt);
+            out.writeLong(value.counterRecid);
             for(int i=0;i<16;i++){
                 Utils.packLong(out, value.segmentRecids[i]);
@@ -100,4 +102,5 @@
             r.hasValues = in.readBoolean();
             r.hashSalt = in.readInt();
+            r.counterRecid = in.readLong();
             r.segmentRecids = new long[16];
             for(int i=0;i<16;i++){
@@ -116,6 +119,8 @@
         boolean hasValues;
         int hashSalt;
+        long counterRecid;
         Serializer keySerializer;
         Serializer valueSerializer;
+
     }
 
@@ -212,10 +217,13 @@
      * @param valueSerializer Serializer used for values. May be null for default value
      */
-    public HTreeMap(Engine engine, boolean hasValues, int hashSalt, Serializer defaultSerializer, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
+    public HTreeMap(Engine engine, boolean hasValues, boolean keepCounter, int hashSalt, Serializer defaultSerializer, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
         this.engine = engine;
         this.hasValues = hasValues;
         this.hashSalt = hashSalt;
+
+
         SerializerBase.assertSerializable(keySerializer);
         SerializerBase.assertSerializable(valueSerializer);
+
 
         if(defaultSerializer == null) defaultSerializer = Serializer.BASIC_SERIALIZER;
@@ -229,11 +237,23 @@
         for(int i=0;i<16;i++)
             segmentRecids[i] = engine.put(new long[16][], DIR_SERIALIZER);
+
+        long counterRecid = 0;
+        if(keepCounter){
+            counterRecid = engine.put(0L, Serializer.LONG_SERIALIZER);
+            this.counter = new Atomic.Long(engine,counterRecid);
+            Bind.size(this,counter);
+        }else{
+            this.counter = null;
+        }
+
         HashRoot r = new HashRoot();
         r.hasValues = hasValues;
         r.hashSalt = hashSalt;
+        r.counterRecid = counterRecid;
         r.segmentRecids = segmentRecids;
         r.keySerializer = this.keySerializer;
         r.valueSerializer = this.valueSerializer;
         this.rootRecid = engine.put(r, new HashRootSerializer(defaultSerializer));
+
     }
 
@@ -259,4 +279,11 @@
         this.keySerializer = r.keySerializer;
         this.valueSerializer = r.valueSerializer;
+
+        if(r.counterRecid!=0){
+            counter = new Atomic.Long(engine,r.counterRecid);
+            Bind.size(this,counter);
+        }else{
+            this.counter = null;
+        }
     }
 
@@ -294,4 +321,8 @@
     @Override
     public int size() {
+        if(counter!=null)
+            return (int) counter.get(); //TODO larger then MAX_INT
+
+
         long counter = 0;
 
Index: plications/editors/josm/plugins/imagerycache/src/org/mapdb/Locks.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Locks.java	(revision 29483)
+++ 	(revision )
@@ -1,141 +1,0 @@
-/*
- *  Copyright (c) 2012 Jan Kotek
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.mapdb;
-
-import java.util.concurrent.locks.LockSupport;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * Contains various concurrent locking utilities
- */
-public final class Locks {
-
-    private Locks(){}
-
-
-    /**
-     * An array of ReentrantLocks with infinitive size.
-     * Is used for per-record locking.
-     */
-    public interface RecidLocks{
-        /**
-         * Unlock given recid. Throws an unspecified exception of recid is not locked
-         * @param recid number
-         */
-        public void unlock(final long recid);
-
-        /**
-         * Throws an exception if current thread holds any locks.
-         * Used for assertion that all recids were properly released
-         */
-        public void assertNoLocks();
-        /**
-         * Locks record with given recid. Blocks if already locked, until lock becomes available.
-         * @param recid number
-         */
-        public void lock(final long recid);
-    }
-
-    /**
-     * Holds all existing locks in HashMap.
-     * Lock/unlock operation looks up lock existence in map and act accordingly.
-     * Usefull if there is only handful of locks
-     */
-    public static class LongHashMapRecidLocks implements RecidLocks{
-
-        protected final LongConcurrentHashMap<Thread> locks = new LongConcurrentHashMap<Thread>();
-
-        public void unlock(final long recid) {
-            if(CC.LOG_LOCKS)
-                Utils.LOG.finest("UNLOCK R:"+recid+" T:"+Thread.currentThread().getId());
-
-            final Thread t = locks.remove(recid);
-            if(t!=Thread.currentThread())
-                throw new InternalError("unlocked wrong thread");
-
-        }
-
-        public void assertNoLocks(){
-            if(CC.PARANOID){
-                LongMap.LongMapIterator<Thread> i = locks.longMapIterator();
-                while(i.moveToNext()){
-                    if(i.value()==Thread.currentThread()){
-                        throw new InternalError("Node "+i.key()+" is still locked");
-                    }
-                }
-            }
-        }
-
-        public void lock(final long recid) {
-            if(CC.LOG_LOCKS)
-                Utils.LOG.finest("TRYLOCK R:"+recid+" T:"+Thread.currentThread().getId());
-
-            //feel free to rewrite, if you know better (more efficient) way
-            if(locks.get(recid)==Thread.currentThread()){
-                //check node is not already locked by this thread
-                throw new InternalError("node already locked by current thread: "+recid);
-            }
-
-
-            while(locks.putIfAbsent(recid, Thread.currentThread()) != null){
-                LockSupport.parkNanos(10);
-            }
-            if(CC.LOG_LOCKS)
-                Utils.LOG.finest("LOCK R:"+recid+" T:"+Thread.currentThread().getId());
-        }
-    }
-
-    /**
-     * Fixed size array of locks. <code>Recid % locks.length</code> (modulo)
-     * is used to determine which lock should be used.
-     */
-    public static class SegmentedRecidLocks implements RecidLocks{
-
-        protected final ReentrantLock[] locks;
-
-        protected final int numSegments;
-
-        /**
-         * @param numSegments number of locks, larger number means better concurrency but larger memory overhead. Good value is 16
-         */
-        public SegmentedRecidLocks(int numSegments) {
-            this.numSegments = numSegments;
-            locks = new ReentrantLock[numSegments];
-            for(int i=0;i<numSegments;i++)
-                locks[i] = new ReentrantLock();
-        }
-
-        @Override
-        public void unlock(long recid) {
-            locks[((int) (recid % numSegments))].unlock();
-        }
-
-        @Override
-        public void assertNoLocks() {
-            for(ReentrantLock l:locks){
-                if(l.isLocked())
-                    throw new InternalError("Some node is still locked by current thread");
-            }
-        }
-
-        @Override
-        public void lock(long recid) {
-            locks[((int) (recid % numSegments))].lock();
-        }
-    }
-
-}
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Queues.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Queues.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Queues.java	(revision 29484)
@@ -1,5 +1,3 @@
 package org.mapdb;
-
-
 
 import java.io.DataInput;
@@ -13,5 +11,5 @@
 
 /**
- * Various queues algorithms
+ * Various queue algorithms
  */
 public final class Queues {
@@ -107,5 +105,5 @@
         public void clear() {
             while(!isEmpty())
-                remove();
+                poll();
         }
 
@@ -124,5 +122,4 @@
             if(ret == null) throw new NoSuchElementException();
             return ret;
-
         }
 
@@ -132,5 +129,4 @@
             return add(e);
         }
-
 
 
@@ -202,6 +198,5 @@
 
         protected final boolean useLocks;
-        protected final Locks.RecidLocks locks;
-
+        protected final ReentrantLock[] locks;
 
 
@@ -209,5 +204,5 @@
             super(engine, serializer, headerRecid);
             this.useLocks = useLocks;
-            locks = useLocks? new Locks.LongHashMapRecidLocks() : null;
+            locks = useLocks? Utils.newLocks(32) : null;
         }
 
@@ -229,14 +224,14 @@
             Node<E> n;
             do{
-                if(useLocks && head2!=0)locks.unlock(head2);
+                if(useLocks && head2!=0)Utils.lock(locks,head2);
                 head2 =head.get();
                 if(head2 == 0) return null;
 
-                if(useLocks && head2!=0)locks.lock(head2);
+                if(useLocks && head2!=0)Utils.lock(locks,head2);
                 n = engine.get(head2, nodeSerializer);
             }while(n==null || !head.compareAndSet(head2, n.next));
             if(useLocks && head2!=0){
                 engine.delete(head2,Serializer.LONG_SERIALIZER);
-                locks.unlock(head2);
+                Utils.unlock(locks,head2);
             }else{
                 engine.update(head2, null, nodeSerializer);
@@ -328,10 +323,5 @@
         }
 
-
-        @Override
-        public boolean isEmpty() {
-            return head.get() == 0;
-        }
-
+        @Override
         public boolean add(E item){
             final long nextTail = engine.put((Node<E>)Node.EMPTY, nodeSerializer);
@@ -347,4 +337,5 @@
         }
 
+        @Override
         public E poll(){
             while(true){
@@ -472,4 +463,17 @@
 
         @Override
+        public void clear() {
+            // praise locking
+            lock.lock();
+            try {
+                for (int i = 0; i < size; i++) {
+                    poll();
+                }
+            } finally {
+                lock.unlock();
+            }
+        }
+
+        @Override
         public E poll() {
             lock.lock();
@@ -569,4 +573,3 @@
     }
 
-
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Serializer.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Serializer.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Serializer.java	(revision 29484)
@@ -20,4 +20,5 @@
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.Serializable;
 import java.util.zip.CRC32;
 
@@ -148,5 +149,5 @@
      */
     
-    public static final Serializer<byte[]> CRC32_CHECKSUM = new Serializer<byte[]>() {
+    Serializer<byte[]> CRC32_CHECKSUM = new Serializer<byte[]>() {
         @Override
         public void serialize(DataOutput out, byte[] value) throws IOException {
@@ -174,4 +175,42 @@
 
 
+    Serializer<byte[] > BYTE_ARRAY_SERIALIZER = new Serializer<byte[]>() {
+
+        @Override
+        public void serialize(DataOutput out, byte[] value) throws IOException {
+            out.write(value);
+        }
+
+        @Override
+        public byte[] deserialize(DataInput in, int available) throws IOException {
+            byte[] ret = new byte[available];
+            in.readFully(ret);
+            return ret;
+        }
+    } ;
+
+
+    class CompressSerializerWrapper<E> implements Serializer<E>, Serializable {
+        protected final Serializer<E> serializer;
+        public CompressSerializerWrapper(Serializer<E> serializer) {
+            this.serializer = serializer;
+        }
+
+        @Override
+        public void serialize(DataOutput out, E value) throws IOException {
+            //serialize to byte[]
+            DataOutput2 out2 = new DataOutput2();
+            serializer.serialize(out2, value);
+            byte[] b = out2.copyBytes();
+            CompressLZF.SERIALIZER.serialize(out, b);
+        }
+
+        @Override
+        public E deserialize(DataInput in, int available) throws IOException {
+            byte[] b = CompressLZF.SERIALIZER.deserialize(in, available);
+            DataInput2 in2 = new DataInput2(b);
+            return serializer.deserialize(in2, b.length);
+        }
+    }
 }
 
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SerializerBase.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SerializerBase.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SerializerBase.java	(revision 29484)
@@ -34,9 +34,9 @@
 
 
-    static final Set knownSerializable = new HashSet(Arrays.asList(
+    static final class knownSerializable{
+        static final Set get = new HashSet(Arrays.asList(
             BTreeKeySerializer.STRING,
             BTreeKeySerializer.ZERO_OR_POSITIVE_LONG,
             BTreeKeySerializer.ZERO_OR_POSITIVE_INT,
-
             Utils.COMPARABLE_COMPARATOR, Utils.COMPARABLE_COMPARATOR_WITH_NULLS,
 
@@ -44,8 +44,9 @@
             Serializer.EMPTY_SERIALIZER, Serializer.BASIC_SERIALIZER, Serializer.CRC32_CHECKSUM
     ));
+    }
 
     public static void assertSerializable(Object o){
         if(o!=null && !(o instanceof Serializable)
-                && !knownSerializable.contains(o)){
+                && !knownSerializable.get.contains(o)){
             throw new IllegalArgumentException("Not serializable: "+o.getClass());
         }
@@ -55,5 +56,5 @@
      * Utility class similar to ArrayList, but with fast identity search.
      */
-    final static class FastArrayList<K> {
+    protected final static class FastArrayList<K> {
 
         private int size = 0;
@@ -303,7 +304,7 @@
             if(((BTreeKeySerializer.BasicKeySerializer)obj).defaultSerializer!=this) throw new InternalError();
             return;
-        } else if(clazz == CompressLZF.SerializerCompressWrapper.class){
+        } else if(clazz == CompressSerializerWrapper.class){
             out.write(SERIALIZER_COMPRESSION_WRAPPER);
-            serialize(out, ((CompressLZF.SerializerCompressWrapper)obj).serializer, objectStack);
+            serialize(out, ((CompressSerializerWrapper)obj).serializer, objectStack);
             return;
 
@@ -1111,5 +1112,5 @@
                 break;
             case SERIALIZER_COMPRESSION_WRAPPER:
-                ret = CompressLZF.serializerCompressWrapper((Serializer) deserialize(is, objectStack));
+                ret = CompressLZF.CompressionWrapper((Serializer) deserialize(is, objectStack));
                 break;
             default:
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SnapshotEngine.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SnapshotEngine.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/SnapshotEngine.java	(revision 29484)
@@ -3,4 +3,5 @@
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -15,5 +16,5 @@
 public class SnapshotEngine extends EngineWrapper{
 
-    protected final Locks.RecidLocks locks = new Locks.LongHashMapRecidLocks();
+    protected final ReentrantLock[] locks =  Utils.newLocks(32);
 
     protected final static Object NOT_EXIST = new Object();
@@ -38,5 +39,5 @@
     public <A> long put(A value, Serializer<A> serializer) {
         long recid = super.put(value, serializer);
-        locks.lock(recid);
+        Utils.lock(locks,recid);
         try{
             for(Snapshot s:snapshots.keySet()){
@@ -45,5 +46,5 @@
             return recid;
         }finally{
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -51,5 +52,5 @@
     @Override
     public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
-        locks.lock(recid);
+        Utils.lock(locks,recid);
         try{
             boolean ret =  super.compareAndSwap(recid, expectedOldValue, newValue, serializer);
@@ -61,5 +62,5 @@
             return ret;
         }finally{
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -67,5 +68,5 @@
     @Override
     public <A> void update(long recid, A value, Serializer<A> serializer) {
-        locks.lock(recid);
+        Utils.lock(locks,recid);
         try{
             Object val = NOT_INIT_YET;
@@ -80,5 +81,5 @@
             super.update(recid, value, serializer);
         }finally{
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -86,5 +87,5 @@
     @Override
     public  <A> void delete(long recid, Serializer<A> serializer) {
-        locks.lock(recid);
+        Utils.lock(locks,recid);
         try{
             Object val = NOT_INIT_YET;
@@ -99,5 +100,5 @@
             super.delete(recid,serializer);
         }finally{
-            locks.unlock(recid);
+            Utils.unlock(locks,recid);
         }
     }
@@ -131,5 +132,5 @@
         @Override
         public <A> A get(long recid, Serializer<A> serializer) {
-            locks.lock(recid);
+            Utils.lock(locks,recid);
             try{
                 Object ret = oldValues.get(recid);
@@ -140,5 +141,5 @@
                 return SnapshotEngine.this.getWrappedEngine().get(recid, serializer);
             }finally{
-                locks.unlock(recid);
+                Utils.unlock(locks,recid);
             }
         }
Index: plications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageAppend.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageAppend.java	(revision 29483)
+++ 	(revision )
@@ -1,341 +1,0 @@
-package org.mapdb;
-
-import java.io.File;
-import java.io.IOError;
-import java.io.IOException;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * Append only storage. Uses different file format than Direct and Journaled storage
- */
-public class StorageAppend implements Engine{
-
-    protected final File file;
-    protected final boolean useRandomAccessFile;
-    protected final boolean readOnly;
-
-    protected final static long FILE_NUMBER_SHIFT = 28;
-    protected final static long FILE_OFFSET_MASK = 0x0FFFFFFFL;
-
-    protected final static long FILE_HEADER = 56465465456465L;
-
-    protected final ReentrantReadWriteLock appendLock = new ReentrantReadWriteLock();
-    protected final static Long THUMBSTONE = Long.MIN_VALUE;
-    protected final static int THUMBSTONE_SIZE = -3;
-    protected final static long EOF = -1;
-    protected final static long COMMIT = -2;
-    protected final static long ROLLBACK = -2;
-
-    protected Volume currentVolume;
-    protected long currentVolumeNum;
-    protected int currentFileOffset;
-    protected long maxRecid = 10;
-
-    protected LongConcurrentHashMap<Volume> volumes = new LongConcurrentHashMap<Volume>();
-    protected final LongConcurrentHashMap<Long> recidsInTx = new LongConcurrentHashMap<Long>();
-
-
-    protected final Volume recidsTable = new Volume.MemoryVol(true);
-    protected static final int MAX_FILE_SIZE = 1024 * 1024 * 10;
-
-    public StorageAppend(File file, boolean useRandomAccessFile, boolean readOnly, boolean transactionsDisabled) {
-        this.file = file;
-        this.useRandomAccessFile = useRandomAccessFile;
-        this.readOnly = readOnly;
-        //TODO special mode with transactions disabled
-
-        File zeroFile = getFileNum(0);
-        if(zeroFile.exists()){
-            replayLog();
-        }else{
-            //create zero file
-            currentVolume = Volume.volumeForFile(zeroFile, useRandomAccessFile, readOnly);
-            currentVolume.ensureAvailable(8);
-            currentVolume.putLong(0, FILE_HEADER);
-            currentFileOffset = 8;
-            volumes.put(0L, currentVolume);
-        }
-
-
-
-
-    }
-
-    protected void replayLog() {
-        try{
-        for(long fileNum=0;;fileNum++){
-            File f = getFileNum(fileNum);
-            if(!f.exists()) return;
-            currentVolume = Volume.volumeForFile(f, useRandomAccessFile, readOnly);
-            volumes.put(fileNum, currentVolume);
-            currentVolumeNum = fileNum;
-
-            //replay file and rebuild recid index table
-            LongHashMap<Long> recidsTable2 = new LongHashMap<Long>();
-            if(!currentVolume.isEmpty()){
-                int pos =0;
-                long header = currentVolume.getLong(pos); pos+=8;
-                if(header!=FILE_HEADER) throw new InternalError();
-
-                for(;;){
-                    long recid = currentVolume.getLong(pos); pos+=8;
-                    maxRecid = Math.max(recid, maxRecid);
-
-                    if(recid == EOF || recid == 0){
-                        break; //end of file
-                    }else if(recid == COMMIT){
-                        //move stuff from temporary table to currently used
-                        commitRecids(recidsTable2);
-                        continue;
-                    }else if(recid == ROLLBACK){
-                        //do not use last recids
-                        recidsTable2.clear();
-                        continue;
-                    }
-
-                    long filePos = (fileNum<<FILE_NUMBER_SHIFT) | pos;
-                    int size = currentVolume.getInt(pos); pos+=4;
-                    if(size!=THUMBSTONE_SIZE){
-                        //skip data
-                        pos+=size;
-                        //store location within the log files in memory
-                        recidsTable2.put(recid, filePos);
-                    }else{
-                        //record was deleted (THUMBSTONE mark)
-                        recidsTable2.put(recid, THUMBSTONE);
-                    }
-                }
-
-            }
-        }
-        }catch(IOError e){
-            //TODO error is part of workflow, but maybe change workflow?
-        }
-    }
-
-    protected File getFileNum(long fileNum) {
-        return new File(file.getPath()+"."+fileNum);
-    }
-
-
-    protected void commitRecids(LongMap<Long> recidsTable2) {
-        LongMap.LongMapIterator<Long> iter = recidsTable2.longMapIterator();
-        while(iter.moveToNext()){
-            long recidsTableOffset = iter.key()*8;
-            recidsTable.ensureAvailable(recidsTableOffset+8);
-            recidsTable.putLong(recidsTableOffset, iter.value());
-        }
-        recidsTable2.clear();
-    }
-
-
-    @Override
-    public <A> long put(A value, Serializer<A> serializer) {
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out, value);
-            appendLock.writeLock().lock();
-            try{
-
-                long newRecid = maxRecid++; //TODO free recid management
-                update2(newRecid, out);
-                rollOverFile();
-                return newRecid;
-            }finally {
-                appendLock.writeLock().unlock();
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-    }
-
-    protected void update2(long recid, DataOutput2 out) {
-        currentVolume.ensureAvailable(currentFileOffset+8+4+out.pos);
-        currentVolume.putLong(currentFileOffset,recid);
-        currentFileOffset+=8;
-        long filePos = (currentVolumeNum<<FILE_NUMBER_SHIFT) | currentFileOffset;
-
-        currentVolume.putInt(currentFileOffset,out.pos);
-        currentFileOffset+=4;
-        currentVolume.putData(currentFileOffset,out.buf, out.pos);
-        currentFileOffset+=out.pos;
-        recidsInTx.put(recid, filePos);
-    }
-
-    @Override
-    public <A> A get(long recid, Serializer<A> serializer) {
-        appendLock.readLock().lock();
-        try {
-            Long fileNum2 = recidsInTx.get(recid);
-            if(fileNum2 == null)
-                    fileNum2 = recidsTable.getLong(recid*8);
-
-            if(fileNum2 == THUMBSTONE){  //there is warning about '==', it is ok
-                //record was deleted;
-                return null;
-            }
-
-            if(fileNum2 == 0){
-                return serializer.deserialize(new DataInput2(new byte[0]), 0);
-            }
-
-            long fileNum = fileNum2;
-
-            long fileOffset = fileNum & FILE_OFFSET_MASK;
-            if(fileOffset>MAX_FILE_SIZE) throw new InternalError();
-            fileNum = fileNum>>>FILE_NUMBER_SHIFT;
-            Volume v = volumes.get(fileNum);
-
-            int size = v.getInt(fileOffset);
-            DataInput2 input = v.getDataInput(fileOffset+4, size);
-
-            return serializer.deserialize(input, size);
-        } catch (IOException e) {
-            throw new IOError(e);
-        }finally {
-            appendLock.readLock().unlock();
-        }
-
-    }
-
-    @Override
-    public <A> void update(long recid, A value, Serializer<A> serializer) {
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out, value);
-            appendLock.writeLock().lock();
-            try {
-                update2(recid, out);
-                rollOverFile();
-            }finally {
-                appendLock.writeLock().unlock();
-            }
-
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-
-    }
-
-    @Override
-    public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
-        appendLock.writeLock().lock();
-        try{
-            Object oldVal = get(recid, serializer);
-            //TODO compare binary stuff?
-            if((oldVal==null && expectedOldValue==null)|| (oldVal!=null && oldVal.equals(expectedOldValue))){
-                DataOutput2 out = new DataOutput2();
-                try {
-                    serializer.serialize(out, newValue); //TODO serialize outside of APPEND_LOCK
-                } catch (IOException e) {
-                    throw new IOError(e);
-                }
-                update2(recid, out);
-                rollOverFile();
-                return true;
-            }else{
-                return false;
-            }
-        }finally {
-            appendLock.writeLock().unlock();
-        }
-
-    }
-
-    @Override
-    public <A> void delete(long recid, Serializer<A> serializer){
-        //put thumbstone into log
-        appendLock.writeLock().lock();
-        try{
-
-            currentVolume.ensureAvailable(currentFileOffset+8+4);
-            currentVolume.putLong(currentFileOffset, recid);
-            currentFileOffset+=8;
-            currentVolume.putInt(currentFileOffset, THUMBSTONE_SIZE);
-            currentFileOffset+=4;
-            recidsInTx.put(recid, THUMBSTONE);
-            rollOverFile();
-        }finally {
-            appendLock.writeLock().unlock();
-        }
-
-    }
-
-    @Override
-    public void close() {
-        currentVolume = null;
-        volumes = null;
-    }
-
-    @Override
-    public boolean isClosed() {
-        return volumes==null;
-    }
-
-    @Override
-    public void commit() {
-        //append commit mark
-        appendLock.writeLock().lock();
-        try{
-            commitRecids(recidsInTx);
-            currentVolume.ensureAvailable(currentFileOffset+8);
-            currentVolume.putLong(currentFileOffset, COMMIT);
-            currentFileOffset+=8;
-            currentVolume.sync();
-            rollOverFile();
-        }finally {
-            appendLock.writeLock().unlock();
-        }
-    }
-
-    @Override
-    public void rollback() throws UnsupportedOperationException {
-        //append rollback mark
-        appendLock.writeLock().lock();
-        try{
-            currentVolume.ensureAvailable(currentFileOffset+8);
-            currentVolume.putLong(currentFileOffset, ROLLBACK);
-            currentFileOffset+=8;
-            currentVolume.sync();
-            recidsInTx.clear();
-            rollOverFile();
-        }finally {
-            appendLock.writeLock().unlock();
-        }
-
-
-    }
-
-
-    @Override
-    public boolean isReadOnly() {
-        return readOnly;
-    }
-
-    @Override
-    public void compact() {
-        //TODO implement compaction on StorageAppend
-    }
-
-    /** check if current file is too big, if yes finish it and start next file */
-    protected void rollOverFile(){
-        if(currentFileOffset<MAX_FILE_SIZE-8) return;
-
-
-        currentVolume.ensureAvailable(currentFileOffset+8);
-        currentVolume.putLong(currentFileOffset, EOF);
-        currentVolume.sync();
-        currentVolumeNum++;
-        currentVolume = Volume.volumeForFile(
-              getFileNum(currentVolumeNum), useRandomAccessFile, readOnly);
-        currentVolume.ensureAvailable(MAX_FILE_SIZE);
-        currentVolume.putLong(0, FILE_HEADER);
-        currentFileOffset = 8;
-        currentVolume.sync();
-        volumes.put(currentVolumeNum,currentVolume);
-
-    }
-
-}
-
-
Index: plications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageDirect.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageDirect.java	(revision 29483)
+++ 	(revision )
@@ -1,722 +1,0 @@
-/*
- *  Copyright (c) 2012 Jan Kotek
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.mapdb;
-
-import java.io.File;
-import java.io.IOError;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * Storage Engine which saves record directly into file.
- * Is used when transaction journal is disabled.
- *
- * @author Jan Kotek
- */
-public class StorageDirect  implements Engine {
-
-
-
-
-    static final long PHYS_OFFSET_MASK = 0x0000FFFFFFFFFFFFL;
-
-
-    /** File header. First 4 bytes are 'JDBM', last two bytes are store format version */
-    static final long HEADER = 5646556656456456L;
-
-
-    static final int RECID_CURRENT_PHYS_FILE_SIZE = 1;
-    static final int RECID_CURRENT_INDEX_FILE_SIZE = 2;
-
-
-    /** offset in index file which points to FREEINDEX list (free slots in index file) */
-    static final int RECID_FREE_INDEX_SLOTS = 3;
-
-
-    //TODO slots 5 to 18 are currently unused
-
-
-
-    static final int RECID_FREE_PHYS_RECORDS_START = 20;
-
-    static final int NUMBER_OF_PHYS_FREE_SLOT =1000 + 1535;
-    static final int MAX_RECORD_SIZE = 65535;
-
-    /** must be smaller then 127 */
-    static final byte LONG_STACK_NUM_OF_RECORDS_PER_PAGE = 100;
-
-    static final int LONG_STACK_PAGE_SIZE =   8 + LONG_STACK_NUM_OF_RECORDS_PER_PAGE * 8;
-
-    /** offset in index file from which normal physid starts */
-    static final int INDEX_OFFSET_START = RECID_FREE_PHYS_RECORDS_START +NUMBER_OF_PHYS_FREE_SLOT;
-    public static final String DATA_FILE_EXT = ".p";
-
-
-    protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-
-    protected final boolean appendOnly;
-    protected final boolean deleteFilesOnExit;
-    protected final boolean failOnWrongHeader;
-    protected final boolean readOnly;
-
-    volatile protected Volume phys;
-    volatile protected Volume index;
-
-    public StorageDirect(Volume.Factory volFac, boolean appendOnly,
-                   boolean deleteFilesOnExit, boolean failOnWrongHeader, boolean readOnly) {
-
-        this.appendOnly = appendOnly;
-        this.deleteFilesOnExit = deleteFilesOnExit;
-        this.failOnWrongHeader = failOnWrongHeader;
-        this.readOnly = readOnly;
-
-        try{
-            lock.writeLock().lock();
-
-
-            phys = volFac.createPhysVolume();
-            index = volFac.createIndexVolume();
-            phys.ensureAvailable(8);
-            index.ensureAvailable(INDEX_OFFSET_START*8);
-
-            final long header = index.isEmpty()? 0 : index.getLong(0);
-            if(header!=HEADER){
-                if(failOnWrongHeader) throw new IOError(new IOException("Wrong file header"));
-                else writeInitValues();
-            }
-
-            File indexFile = index.getFile();
-            if(!(this instanceof StorageJournaled) && indexFile !=null
-                    && new File(indexFile.getPath()+ StorageJournaled.TRANS_LOG_FILE_EXT).exists()){
-                throw new IllegalAccessError("Could not open DB in Direct Mode; WriteAhead log file exists, it may contain some data.");
-            }
-
-        }finally {
-            lock.writeLock().unlock();
-        }
-
-    }
-
-    public StorageDirect(Volume.Factory volFac){
-        this(volFac, false, false,false, false);
-    }
-
-
-
-
-    @Override
-    public <A> long put(A value, Serializer<A> serializer) {
-        if(value == null||serializer==null) throw new NullPointerException();
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out,value);
-            //TODO log warning if record is too big
-
-
-            try{
-                lock.writeLock().lock();
-                //update index file, find free recid
-                long recid = longStackTake(RECID_FREE_INDEX_SLOTS);
-                if(recid == 0){
-                    //could not reuse recid, so create new one
-                    final long indexSize = index.getLong(RECID_CURRENT_INDEX_FILE_SIZE * 8);
-                    if(indexSize%8!=0) throw new InternalError();
-                    recid = indexSize/8;
-                    //grow buffer if necessary
-                    index.ensureAvailable(indexSize+8);
-                    index.putLong(RECID_CURRENT_INDEX_FILE_SIZE * 8, indexSize + 8);
-                }
-
-                if(out.pos<MAX_RECORD_SIZE){
-                    //is small size and can be stored in single record
-                    //get physical record, first 16 bites is record size, remaining 48 bytes is record offset in phys file
-                    final long indexValue = out.pos!=0?
-                            freePhysRecTake(out.pos):
-                            0L;
-
-                    phys.putData(indexValue&PHYS_OFFSET_MASK, out.buf, out.pos);
-                    index.putLong(recid * 8, indexValue);
-                }else{
-                    putLargeLinkedRecord(out, recid);
-                }
-
-                return recid - INDEX_OFFSET_START;
-            }finally {
-                lock.writeLock().unlock();
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-    }
-
-    private void putLargeLinkedRecord(DataOutput2 out, long recid) throws IOException {
-        //large size, needs to link multiple records together
-        //start splitting from end, so we can build up linked list
-        final int chunkSize = MAX_RECORD_SIZE-8;
-        int lastArrayPos = out.pos;
-        int arrayPos = out.pos - out.pos%chunkSize;
-        long lastChunkPhysId = 0;
-        while(arrayPos>=0){
-            final int currentChunkSize = lastArrayPos-arrayPos;
-            byte[] b = new byte[currentChunkSize+8]; //TODO reuse byte[]
-            //append reference to prev physId
-            ByteBuffer.wrap(b).putLong(0, lastChunkPhysId);
-            //copy chunk
-            System.arraycopy(out.buf, arrayPos, b, 8, currentChunkSize);
-            //and write current chunk
-            lastChunkPhysId = freePhysRecTake(currentChunkSize+8);
-            phys.putData(lastChunkPhysId&PHYS_OFFSET_MASK, b, b.length);
-            lastArrayPos = arrayPos;
-            arrayPos-=chunkSize;
-        }
-        index.putLong(recid * 8, lastChunkPhysId);
-    }
-
-
-    @Override
-    public <A> A get(long recid, Serializer<A> serializer) {
-        if(serializer==null) throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid += INDEX_OFFSET_START;
-        try{
-            try{
-                lock.readLock().lock();
-                final long indexValue = index.getLong(recid * 8) ;
-                return recordGet2(indexValue, phys, serializer);
-            }finally{
-                lock.readLock().unlock();
-            }
-
-
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-    }
-
-
-
-    @Override
-    public <A> void update(long recid, A value, Serializer<A> serializer){
-        if(value == null||serializer==null) throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid+=INDEX_OFFSET_START;
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out,value);
-
-            try{
-                lock.writeLock().lock();
-
-                final long oldIndexVal = index.getLong(recid * 8);
-                final long oldSize = oldIndexVal>>>48;
-
-                //check if we need to split new records into multiple one
-                if(out.pos<MAX_RECORD_SIZE){
-                    //check if size has changed
-                    if(oldSize == 0 && out.pos==0){
-                        //do nothing
-                    }else if(oldSize == out.pos && oldSize!=MAX_RECORD_SIZE){
-                        //size is the same, so just write new data
-                        phys.putData(oldIndexVal&PHYS_OFFSET_MASK, out.buf, out.pos);
-                    }else if(oldSize != 0 && out.pos==0){
-                        //new record has zero size, just delete old phys one
-                        freePhysRecPut(oldIndexVal);
-                        index.putLong(recid * 8, 0L);
-                    }else{
-                        //size has changed, so write into new location
-                        final long newIndexValue = freePhysRecTake(out.pos);
-                        phys.putData(newIndexValue&PHYS_OFFSET_MASK, out.buf, out.pos);
-                        //update index file with new location
-                        index.putLong(recid * 8, newIndexValue);
-
-                        //and set old phys record as free
-                        unlinkPhysRecord(oldIndexVal,recid);
-                    }
-                }else{
-                    putLargeLinkedRecord(out, recid);
-                    //and set old phys record as free
-                    unlinkPhysRecord(oldIndexVal,recid);
-                }
-            }finally {
-                lock.writeLock().unlock();
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-    }
-
-
-   @Override
-   public <A> void delete(long recid, Serializer<A> serializer){
-        if(serializer==null)throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid+=INDEX_OFFSET_START;
-        try{
-            lock.writeLock().lock();
-            final long oldIndexVal = index.getLong(recid * 8);
-            index.putLong(recid * 8, 0L);
-            longStackPut(RECID_FREE_INDEX_SLOTS,recid);
-            unlinkPhysRecord(oldIndexVal,recid);
-        }catch(IOException e){
-            throw new IOError(e);
-        }finally {
-            lock.writeLock().unlock();
-        }
-    }
-
-    @Override
-    public void commit() {
-        //TODO sync here?
-    }
-
-    @Override
-    public void rollback() {
-        throw new UnsupportedOperationException("Can not rollback, transactions disabled.");
-    }
-
-
-
-   protected long longStackTake(final long listRecid) throws IOException {
-        final long dataOffset = index.getLong(listRecid * 8) &PHYS_OFFSET_MASK;
-        if(dataOffset == 0)
-            return 0; //there is no such list, so just return 0
-
-        writeLock_checkLocked();
-
-
-        final int numberOfRecordsInPage = phys.getUnsignedByte(dataOffset);
-
-        if(numberOfRecordsInPage<=0) throw new InternalError();
-        if(numberOfRecordsInPage>LONG_STACK_NUM_OF_RECORDS_PER_PAGE) throw new InternalError();
-
-        final long ret = phys.getLong (dataOffset+numberOfRecordsInPage*8);
-
-        //was it only record at that page?
-        if(numberOfRecordsInPage == 1){
-            //yes, delete this page
-            final long previousListPhysid =phys.getLong(dataOffset) &PHYS_OFFSET_MASK;
-            if(previousListPhysid !=0){
-                //update index so it points to previous page
-                index.putLong(listRecid * 8, previousListPhysid | (((long) LONG_STACK_PAGE_SIZE) << 48));
-            }else{
-                //zero out index
-                index.putLong(listRecid * 8, 0L);
-            }
-            //put space used by this page into free list
-            freePhysRecPut(dataOffset | (((long)LONG_STACK_PAGE_SIZE)<<48));
-        }else{
-            //no, it was not last record at this page, so just decrement the counter
-            phys.putUnsignedByte(dataOffset, (byte) (numberOfRecordsInPage - 1));
-        }
-        return ret;
-
-    }
-
-
-   protected void longStackPut(final long listRecid, final long offset) throws IOException {
-       writeLock_checkLocked();
-
-       //index position was cleared, put into free index list
-        final long listPhysid2 = index.getLong(listRecid * 8) &PHYS_OFFSET_MASK;
-
-        if(listPhysid2 == 0){ //empty list?
-            //yes empty, create new page and fill it with values
-            final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK;
-            if(listPhysid == 0) throw new InternalError();
-            //set previous Free Index List page to zero as this is first page
-            phys.putLong(listPhysid, 0L);
-            //set number of free records in this page to 1
-            phys.putUnsignedByte(listPhysid, (byte) 1);
-
-            //set  record
-            phys.putLong(listPhysid + 8, offset);
-            //and update index file with new page location
-            index.putLong(listRecid * 8, (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
-        }else{
-            final int numberOfRecordsInPage = phys.getUnsignedByte(listPhysid2);
-            if(numberOfRecordsInPage == LONG_STACK_NUM_OF_RECORDS_PER_PAGE){ //is current page full?
-                //yes it is full, so we need to allocate new page and write our number there
-
-                final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK;
-                if(listPhysid == 0) throw new InternalError();
-                //final ByteBuffers dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))];
-                //set location to previous page
-                phys.putLong(listPhysid, listPhysid2);
-                //set number of free records in this page to 1
-                phys.putUnsignedByte(listPhysid, (byte) 1);
-                //set free record
-                phys.putLong(listPhysid +  8, offset);
-                //and update index file with new page location
-                index.putLong(listRecid * 8, (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
-            }else{
-                //there is space on page, so just write released recid and increase the counter
-                phys.putLong(listPhysid2 +  8 + 8 * numberOfRecordsInPage, offset);
-                phys.putUnsignedByte(listPhysid2, (byte) (numberOfRecordsInPage + 1));
-            }
-        }
-   }
-
-
-
-
-	protected long freePhysRecTake(final int requiredSize) throws IOException {
-        writeLock_checkLocked();
-
-        if(requiredSize<=0) throw new InternalError();
-
-        long freePhysRec = (appendOnly
-                //TODO !HACK! to 'fix' issue 69
-                || Thread.currentThread().getStackTrace().length>256)
-                ? 0L:
-                findFreePhysSlot(requiredSize);
-        if(freePhysRec!=0){
-            return freePhysRec;
-        }
-
-
-
-        //No free records found, so lets increase the file size.
-        //We need to take case of growing ByteBuffers.
-        // Also max size of ByteBuffers is 2GB, so we need to use multiple ones
-
-        final long physFileSize = index.getLong(RECID_CURRENT_PHYS_FILE_SIZE*8);
-        if(physFileSize <=0) throw new InternalError("illegal file size:"+physFileSize);
-
-        //check if new record would be overflowing BUF_SIZE
-        if(physFileSize%Volume.BUF_SIZE+requiredSize<=Volume.BUF_SIZE){
-            //no, so just increase file size
-            phys.ensureAvailable(physFileSize+requiredSize);
-            //so just increase buffer size
-            index.putLong(RECID_CURRENT_PHYS_FILE_SIZE * 8, physFileSize + requiredSize);
-
-            //and return this
-            return (((long)requiredSize)<<48) | physFileSize;
-        }else{
-            //new size is overlapping 2GB ByteBuffers size
-            //so we need to create empty record for 'padding' size to 2GB
-
-            final long  freeSizeToCreate = Volume.BUF_SIZE -  physFileSize%Volume.BUF_SIZE;
-            if(freeSizeToCreate == 0) throw new InternalError();
-
-            final long nextBufferStartOffset = physFileSize + freeSizeToCreate;
-            if(nextBufferStartOffset%Volume.BUF_SIZE!=0) throw new InternalError();
-
-            //increase the disk size
-            phys.ensureAvailable(physFileSize + freeSizeToCreate + requiredSize);
-            index.putLong(RECID_CURRENT_PHYS_FILE_SIZE * 8, physFileSize + freeSizeToCreate + requiredSize);
-
-            //mark 'padding' free record
-            freePhysRecPut((freeSizeToCreate<<48)|physFileSize);
-
-            //and finally return position at beginning of new buffer
-            return (((long)requiredSize)<<48) | nextBufferStartOffset;
-        }
-
-    }
-
-
-
-    private void writeInitValues() {
-        writeLock_checkLocked();
-
-        //zero out all index values
-        for(int i=1;i<=INDEX_OFFSET_START+Engine.LAST_RESERVED_RECID;i++){
-            index.putLong(i*8, 0L);
-        }
-
-        //write headers
-        phys.putLong(0, HEADER);
-        index.putLong(0L,HEADER);
-        if(index.getLong(0L)!=HEADER)
-            throw new InternalError();
-
-
-        //and set current sizes
-        index.putLong(RECID_CURRENT_PHYS_FILE_SIZE * 8, 8L);
-        index.putLong(RECID_CURRENT_INDEX_FILE_SIZE * 8, INDEX_OFFSET_START * 8 + Engine.LAST_RESERVED_RECID*8 + 8);
-    }
-
-
-    protected void writeLock_checkLocked() {
-        if(!lock.writeLock().isHeldByCurrentThread())
-            throw new IllegalAccessError("no write lock");
-    }
-
-
-
-    final int freePhysRecSize2FreeSlot(final int size){
-        if(size>MAX_RECORD_SIZE) throw new IllegalArgumentException("too big record");
-        if(size<0) throw new IllegalArgumentException("negative size");
-
-        if(size<1535)
-            return size-1;
-        else if(size == MAX_RECORD_SIZE)
-            return NUMBER_OF_PHYS_FREE_SLOT-1;
-        else
-            return 1535 -1 + (size-1535)/64;
-    }
-
-    @Override
-    public void close() {
-        try{
-            lock.writeLock().lock();
-
-            phys.close();
-            index.close();
-            if(deleteFilesOnExit){
-                phys.deleteFile();
-                index.deleteFile();
-            }
-            phys = null;
-            index = null;
-
-        }finally {
-            lock.writeLock().unlock();
-        }
-    }
-
-    @Override
-    public boolean isClosed(){
-        return index == null;
-    }
-
-    protected  <A> A recordGet2(long indexValue, Volume data, Serializer<A> serializer) throws IOException {
-        final long dataPos = indexValue & PHYS_OFFSET_MASK;
-        final int dataSize = (int) (indexValue>>>48);
-        if(dataPos == 0) return serializer.deserialize(new DataInput2(new byte[0]),0);
-
-        if(dataSize<MAX_RECORD_SIZE){
-            //single record
-            DataInput2 in = data.getDataInput(dataPos, dataSize);
-            final A value = serializer.deserialize(in,dataSize);
-
-            if( in.pos != dataSize + (data.isSliced()?dataPos%Volume.BUF_SIZE:0))
-                throw new InternalError("Data were not fully read.");
-            return value;
-        }else{
-            //large linked record
-            ArrayList<DataInput2> ins = new ArrayList<DataInput2>();
-            ArrayList<Integer> sizes = new ArrayList<Integer>();
-            int recSize = 0;
-            long nextLink = indexValue;
-            while(nextLink!=0){
-                int currentSize = (int) (nextLink>>>48);
-                recSize+= currentSize-8;
-                DataInput2 in = data.getDataInput(nextLink & PHYS_OFFSET_MASK, currentSize);
-                nextLink = in.readLong();
-                ins.add(in);
-                sizes.add(currentSize - 8);
-            }
-            //construct byte[]
-            byte[] b = new byte[recSize];
-            int pos = 0;
-            for(int i=0;i<ins.size();i++){
-                DataInput2 in = ins.set(i,null);
-                int size = sizes.get(i);
-                in.readFully(b, pos, size);
-                pos+=size;
-            }
-            DataInput2 in = new DataInput2(b);
-            final A value = serializer.deserialize(in,recSize);
-
-            if( in.pos != recSize)
-                throw new InternalError("Data were not fully read.");
-            return value;
-        }
-    }
-
-
-
-    protected void freePhysRecPut(final long indexValue) throws IOException {
-        if((indexValue &PHYS_OFFSET_MASK)==0) throw new InternalError("zero indexValue: ");
-        final int size =  (int) (indexValue>>>48);
-
-        final long listRecid = RECID_FREE_PHYS_RECORDS_START + freePhysRecSize2FreeSlot(size);
-        longStackPut(listRecid, indexValue);
-    }
-
-    protected long findFreePhysSlot(int requiredSize) throws IOException {
-        int slot = freePhysRecSize2FreeSlot(requiredSize);
-        //check if this slot can contain smaller records,
-        if(requiredSize>1 && slot==freePhysRecSize2FreeSlot(requiredSize-1))
-            slot ++; //yes, in this case we have to start at next slot with bigger record and divide it
-
-        while(slot< NUMBER_OF_PHYS_FREE_SLOT){
-
-            final long v = longStackTake(RECID_FREE_PHYS_RECORDS_START +slot);
-            if(v!=0){
-                //we found it, check if we need to split record
-                final int foundRecSize = (int) (v>>>48);
-                if(foundRecSize!=requiredSize){
-
-                    //yes we need split
-                    final long newIndexValue =
-                            ((long)(foundRecSize - requiredSize)<<48) | //encode size into new free record
-                                    (v & PHYS_OFFSET_MASK) +   requiredSize; //and encode new free record phys offset
-                    freePhysRecPut(newIndexValue);
-                }
-
-                //return offset combined with required size
-                return (v & PHYS_OFFSET_MASK) |
-                        (((long)requiredSize)<<48);
-            }else{
-                slot++;
-            }
-        }
-        return 0;
-
-    }
-
-    @Override
-    public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer){
-        if(expectedOldValue == null||newValue==null||serializer==null) throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        try{
-            lock.writeLock().lock();
-            Object oldVal = get(recid, serializer);
-            if((oldVal==null && expectedOldValue==null)|| (oldVal!=null && oldVal.equals(expectedOldValue))){
-                update(recid, newValue, serializer);
-                return true;
-            }else{
-                return false;
-            }
-        }finally{
-            lock.writeLock().unlock();
-        }
-
-    }
-
-
-
-    @Override
-    public boolean isReadOnly() {
-        return readOnly;
-    }
-
-
-    protected void unlinkPhysRecord(long indexVal, long recid) throws IOException {
-        int size = (int) (indexVal >>>48);
-        if(size==0) return;
-        if(size<MAX_RECORD_SIZE){
-            freePhysRecPut(indexVal);
-        }else{
-            while(indexVal!=0){
-                //traverse linked record
-                long nextIndexVal = phys.getLong(indexVal&PHYS_OFFSET_MASK);
-                freePhysRecPut(indexVal);
-                indexVal = nextIndexVal;
-            }
-        }
-
-    }
-
-    @Override
-    public void compact(){
-        if(readOnly) throw new IllegalAccessError();
-        if(index.getFile()==null) throw new UnsupportedOperationException("compact not supported for memory storage yet");
-        lock.writeLock().lock();
-        try{
-            //create secondary files for compaction
-            //TODO RAF
-            //TODO memory based stores
-            final File indexFile = index.getFile();
-            final File physFile = phys.getFile();
-            final boolean isRaf = index instanceof Volume.RandomAccessFileVol;
-            Volume.Factory fab = Volume.fileFactory(false, isRaf, new File(indexFile+".compact"));
-            StorageDirect store2 = new StorageDirect(fab);
-
-            //transfer stack of free recids
-            for(long recid =longStackTake(RECID_FREE_INDEX_SLOTS);
-                recid!=0; recid=longStackTake(RECID_FREE_INDEX_SLOTS)){
-                store2.longStackPut(recid, RECID_FREE_INDEX_SLOTS);
-            }
-
-            //iterate over recids and transfer physical records
-            final long indexSize = index.getLong(RECID_CURRENT_INDEX_FILE_SIZE*8)/8;
-
-
-            store2.lock.writeLock().lock();
-            for(long recid = INDEX_OFFSET_START; recid<indexSize;recid++){
-                //read data from first store
-                long physOffset = index.getLong(recid*8);
-                long physSize = physOffset >>> 48;
-                //TODO linked records larger then 64KB
-                physOffset = physOffset & PHYS_OFFSET_MASK;
-
-                //write index value into second storage
-                store2.index.ensureAvailable(recid*8+8);
-
-                //get free place in second store, and write data there
-                if(physSize!=0){
-                    DataInput2 in = phys.getDataInput(physOffset, (int)physSize);
-                    long physOffset2 =
-                            store2.freePhysRecTake((int)physSize) & PHYS_OFFSET_MASK;
-
-                    store2.phys.ensureAvailable((physOffset2 & PHYS_OFFSET_MASK)+physSize);
-                    synchronized (in.buf){
-                        //copy directly from buffer
-                        in.buf.limit((int) (in.pos+physSize));
-                        in.buf.position(in.pos);
-                        store2.phys.putData(physOffset2, in.buf);
-                    }
-                    store2.index.putLong(recid*8, (physSize<<48)|physOffset2);
-                }else{
-                    //just write zeroes
-                    store2.index.putLong(recid*8, 0);
-                }
-            }
-
-            store2.index.putLong(RECID_CURRENT_INDEX_FILE_SIZE*8, indexSize*8);
-
-            File indexFile2 = store2.index.getFile();
-            File physFile2 = store2.phys.getFile();
-            store2.lock.writeLock().unlock();
-            store2.close();
-
-            long time = System.currentTimeMillis();
-            File indexFile_ = new File(indexFile.getPath()+"_"+time+"_orig");
-            File physFile_ = new File(physFile.getPath()+"_"+time+"_orig");
-
-            index.close();
-            phys.close();
-            if(!indexFile.renameTo(indexFile_))throw new InternalError();
-            if(!physFile.renameTo(physFile_))throw new InternalError();
-
-            if(!indexFile2.renameTo(indexFile))throw new InternalError();
-            //TODO process may fail in middle of rename, analyze sequence and add recovery
-            if(!physFile2.renameTo(physFile))throw new InternalError();
-
-            indexFile_.delete();
-            physFile_.delete();
-
-            Volume.Factory fac2 = Volume.fileFactory(false, isRaf, indexFile);
-            index = fac2.createIndexVolume();
-            phys = fac2.createPhysVolume();
-
-        }catch(IOException e){
-            throw new IOError(e);
-        }finally {
-            lock.writeLock().unlock();
-        }
-    }
-
-
-
-
-}
Index: plications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageJournaled.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StorageJournaled.java	(revision 29483)
+++ 	(revision )
@@ -1,713 +1,0 @@
-/*
- *  Copyright (c) 2012 Jan Kotek
- *
- *  Licensed under the Apache License, Version 2.0 (the "License");
- *  you may not use this file except in compliance with the License.
- *  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.mapdb;
-
-import java.io.IOError;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-
-/**
- * StorageDirect which provides transaction and journal.
- * Index file data are stored in memory+trans journal, phys file data are stored only in transaction journal.
- *
- * @author Jan Kotek
- */
-public class StorageJournaled extends StorageDirect implements Engine {
-
-    protected static final long WRITE_INDEX_LONG = 1L <<48;
-    protected static final long WRITE_INDEX_LONG_ZERO = 2L <<48;
-    protected static final long WRITE_PHYS_LONG = 3L <<48;
-    protected static final long WRITE_PHYS_ARRAY = 4L <<48;
-
-    protected static final long WRITE_SKIP_BUFFER = 444L <<48;
-    /** last instruction in log file */
-    protected static final long WRITE_SEAL = 111L <<48;
-    /** added to offset 8 into log file, indicates that write was successful*/
-    protected static final long LOG_SEAL = 4566556446554645L;
-    public static final String TRANS_LOG_FILE_EXT = ".t";
-
-
-    protected Volume transLog;
-    protected final Volume.Factory volFac;
-    protected long transLogOffset;
-
-
-    protected long indexSize;
-    protected long physSize;
-    protected final LongMap<long[]> recordLogRefs = new LongHashMap<long[]>();
-    protected final LongMap<Long> recordIndexVals = new LongHashMap<Long>();
-    protected final LongMap<long[]> longStackPages = new LongHashMap<long[]>();
-    protected final LongMap<ArrayList<Long>> transLinkedPhysRecods = new LongHashMap<ArrayList<Long>>();
-
-
-    public StorageJournaled(Volume.Factory volFac){
-        this(volFac, false, false, false, false);
-    }
-
-    public StorageJournaled(Volume.Factory volFac, boolean appendOnly,
-                            boolean deleteFilesOnExit, boolean failOnWrongHeader, boolean readOnly) {
-        super(volFac,  appendOnly, deleteFilesOnExit, failOnWrongHeader, readOnly);
-        lock.writeLock().lock();
-        try{
-            this.volFac = volFac;
-            this.transLog = volFac.createTransLogVolume();
-            reloadIndexFile();
-            replayLogFile();
-            transLog = null;
-        }finally{
-            lock.writeLock().unlock();
-        }
-    }
-
-
-    protected void reloadIndexFile() {
-        transLogOffset = 0;
-        writeLock_checkLocked();
-        recordLogRefs.clear();
-        recordIndexVals.clear();
-        longStackPages.clear();
-        transLinkedPhysRecods.clear();
-        indexSize = index.getLong(RECID_CURRENT_INDEX_FILE_SIZE *8);
-        physSize = index.getLong(RECID_CURRENT_PHYS_FILE_SIZE*8);
-        writeLock_checkLocked();
-    }
-
-    protected void openLogIfNeeded(){
-       if(transLog!=null) return;
-       transLog = volFac.createTransLogVolume();
-       transLog.ensureAvailable(16);
-       transLog.putLong(0, HEADER);
-       transLog.putLong(8, 0L);
-       transLogOffset = 16;
-    }
-
-
-
-
-
-    @Override
-    public <A> long put(A value, Serializer<A> serializer) {
-        if(value==null||serializer==null) throw new NullPointerException();
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out,value);
-
-            try{
-                lock.writeLock().lock();
-                //update index file, find free recid
-                long recid = longStackTake(RECID_FREE_INDEX_SLOTS);
-                if(recid == 0){
-                    //could not reuse recid, so create new one
-                    if(indexSize%8!=0) throw new InternalError();
-                    recid = indexSize/8;
-                    indexSize+=8;
-                }
-
-                if(out.pos<MAX_RECORD_SIZE){
-                    //get physical record
-                    // first 16 bites is record size, remaining 48 bytes is record offset in phys file
-                    final long indexValue = out.pos!=0?
-                        freePhysRecTake(out.pos):0L;
-                    writeIndexValToTransLog(recid, indexValue);
-
-                    //write new phys data into trans log
-                    writeOutToTransLog(out, recid, indexValue);
-                    checkBufferRounding();
-                }else{
-                    putLargeLinkedRecord(out, recid);
-                }
-
-
-
-                return recid-INDEX_OFFSET_START;
-            }finally {
-                lock.writeLock().unlock();
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-    }
-
-    private void putLargeLinkedRecord(DataOutput2 out, long recid) throws IOException {
-        openLogIfNeeded();
-        //large size, needs to link multiple records together
-        //start splitting from end, so we can build up linked list
-        final int chunkSize = MAX_RECORD_SIZE-8;
-        int lastArrayPos = out.pos;
-        int arrayPos = out.pos - out.pos%chunkSize;
-        long lastChunkPhysId = 0;
-        ArrayList<Long> journalRefs = new ArrayList<Long>();
-        ArrayList<Long> physRecords = new ArrayList<Long>();
-        while(arrayPos>=0){
-            final int currentChunkSize = lastArrayPos-arrayPos;
-            byte[] b = new byte[currentChunkSize+8]; //TODO reuse byte[]
-            //append reference to prev physId
-            ByteBuffer.wrap(b).putLong(0, lastChunkPhysId);
-            //copy chunk
-            System.arraycopy(out.buf, arrayPos, b, 8, currentChunkSize);
-            //and write current chunk
-            lastChunkPhysId = freePhysRecTake(currentChunkSize+8);
-            physRecords.add(lastChunkPhysId);
-            //phys.putData(lastChunkPhysId&PHYS_OFFSET_MASK, b, b.length);
-
-            transLog.ensureAvailable(transLogOffset+10+currentChunkSize+8);
-            transLog.putLong(transLogOffset, WRITE_PHYS_ARRAY|(lastChunkPhysId&PHYS_OFFSET_MASK));
-            transLogOffset+=8;
-            transLog.putUnsignedShort(transLogOffset, currentChunkSize+8);
-            transLogOffset+=2;
-            final Long transLogReference = (((long)currentChunkSize)<<48)|(transLogOffset+8);
-            journalRefs.add(transLogReference);
-            transLog.putData(transLogOffset,b, b.length);
-            transLogOffset+=b.length;
-
-            checkBufferRounding();
-
-            lastArrayPos = arrayPos;
-            arrayPos-=chunkSize;
-        }
-        transLinkedPhysRecods.put(recid,physRecords);
-        writeIndexValToTransLog(recid, lastChunkPhysId);
-        long[] journalRefs2 = new long[journalRefs.size()];
-        for(int i=0;i<journalRefs2.length;i++){
-            journalRefs2[i] = journalRefs.get(i);
-        }
-        recordLogRefs.put(recid, journalRefs2);
-    }
-
-    protected void checkBufferRounding() throws IOException {
-        if(transLogOffset%Volume.BUF_SIZE > Volume.BUF_SIZE - MAX_RECORD_SIZE*2){
-            //position is to close to end of ByteBuffers (1GB)
-            //so start writing into new buffer
-            transLog.ensureAvailable(transLogOffset+8);
-            transLog.putLong(transLogOffset,WRITE_SKIP_BUFFER);
-            transLogOffset += Volume.BUF_SIZE-transLogOffset%Volume.BUF_SIZE;
-        }
-    }
-
-    protected void writeIndexValToTransLog(long recid, long indexValue) throws IOException {
-        //write new index value into transaction log
-        openLogIfNeeded();
-        transLog.ensureAvailable(transLogOffset+16);
-        transLog.putLong(transLogOffset, WRITE_INDEX_LONG | (recid * 8));
-        transLogOffset+=8;
-        transLog.putLong(transLogOffset, indexValue);
-        transLogOffset+=8;
-        recordIndexVals.put(recid,indexValue);
-    }
-
-    protected void writeOutToTransLog(DataOutput2 out, long recid, long indexValue) throws IOException {
-        openLogIfNeeded();
-        transLog.ensureAvailable(transLogOffset+10+out.pos);
-        transLog.putLong(transLogOffset, WRITE_PHYS_ARRAY|(indexValue&PHYS_OFFSET_MASK));
-        transLogOffset+=8;
-        transLog.putUnsignedShort(transLogOffset, out.pos);
-        transLogOffset+=2;
-        final long transLogReference = (((long)out.pos)<<48)|transLogOffset;
-        recordLogRefs.put(recid, new long[]{transLogReference}); //store reference to transaction log, so we can load data quickly
-        transLog.putData(transLogOffset,out.buf, out.pos);
-        transLogOffset+=out.pos;
-    }
-
-
-    @Override
-    public <A> A get(long recid, Serializer<A> serializer) {
-        if(serializer==null)throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid+=INDEX_OFFSET_START;
-
-        try{
-            lock.readLock().lock();
-
-            long[] indexVals = recordLogRefs.get(recid);
-            if(indexVals!=null){
-                if(indexVals.length==1){
-                    //single record
-                    if(indexVals[0] == Long.MIN_VALUE)
-                        return null; //was deleted
-                    //record is in transaction log
-                    return recordGet2(indexVals[0], transLog, serializer);
-                }else{
-                    //read linked record from journal
-                    //first calculate total size
-                    int size = 0;
-                    for(long physId:indexVals) size+= physId>>>48;
-                    byte[] b = new byte[size];
-                    //now load it in chunks
-                    int pos = 0;
-                    for(long physId:indexVals){
-                        int curChunkSize = (int) (physId>>>48);
-                        long offset = physId&PHYS_OFFSET_MASK;
-                        DataInput2 in = transLog.getDataInput(offset, curChunkSize);
-                        in.readFully(b,pos,curChunkSize);
-                        pos+=curChunkSize;
-                    }
-                    if(size!=pos) throw new InternalError();
-                    //now deserialize
-                    DataInput2 in = new DataInput2(b);
-                    A ret = serializer.deserialize(in, size);
-                    if(in.pos!=size) throw new InternalError("Data were not fully read");
-                    return ret;
-                }
-            }else{
-                //not in transaction log, read from file
-                final long indexValue = index.getLong(recid*8) ;
-                 return recordGet2(indexValue, phys, serializer);
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }finally{
-            lock.readLock().unlock();
-        }
-    }
-
-    @Override
-    public <A> void update(long recid, A value, Serializer<A> serializer) {
-        if(value==null||serializer==null) throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid+=INDEX_OFFSET_START;
-
-        try{
-            DataOutput2 out = new DataOutput2();
-            serializer.serialize(out,value);
-
-            try{
-                lock.writeLock().lock();
-
-                //check if size has changed
-                long oldIndexVal = getIndexLong(recid);
-                long oldSize = oldIndexVal>>>48;
-
-                //check if we need to split new records into multiple one
-                if(out.pos<MAX_RECORD_SIZE){
-                    if(oldSize == 0 && out.pos==0){
-                        //do nothing
-                    } else if(oldSize == out.pos ){
-                        //size is the same, so just write new data
-                        writeOutToTransLog(out, recid, oldIndexVal);
-                    }else if(oldSize != 0 && out.pos==0){
-                        //new record has zero size, just delete old phys one
-                        freePhysRecPut(oldIndexVal);
-                        writeIndexValToTransLog(recid, 0L);
-                    }else{
-                        //size has changed, so write into new location
-                        final long newIndexValue = freePhysRecTake(out.pos);
-
-                        writeOutToTransLog(out, recid, newIndexValue);
-                        //update index file with new location
-                        writeIndexValToTransLog(recid, newIndexValue);
-
-                        //and set old phys record as free
-                        unlinkPhysRecord(oldIndexVal,recid);
-                    }
-                }else{
-                    unlinkPhysRecord(oldIndexVal,recid); //unlink must be first to release currently used space
-                    putLargeLinkedRecord(out, recid);
-                }
-
-
-                checkBufferRounding();
-            }finally {
-                lock.writeLock().unlock();
-            }
-        }catch(IOException e){
-            throw new IOError(e);
-        }
-
-    }
-
-    private long getIndexLong(long recid) {
-        Long v = recordIndexVals.get(recid);
-        return (v!=null) ? v :
-             index.getLong(recid * 8);
-    }
-
-    @Override
-    public <A> void delete(long recid, Serializer<A>  serializer){
-        if(serializer==null) throw new NullPointerException();
-        if(recid<=0) throw new IllegalArgumentException("recid");
-        recid+=INDEX_OFFSET_START;
-
-        try{
-            lock.writeLock().lock();
-            openLogIfNeeded();
-
-            transLog.ensureAvailable(transLogOffset+8);
-            transLog.putLong(transLogOffset, WRITE_INDEX_LONG_ZERO | (recid*8));
-            transLogOffset+=8;
-            longStackPut(RECID_FREE_INDEX_SLOTS,recid);
-            recordLogRefs.put(recid, new long[]{Long.MIN_VALUE});
-            //check if is in transaction
-            long oldIndexVal = getIndexLong(recid);
-            recordIndexVals.put(recid,0L);
-            unlinkPhysRecord(oldIndexVal,recid);
-
-
-            checkBufferRounding();
-
-        }catch(IOException e){
-            throw new IOError(e);
-        }finally {
-            lock.writeLock().unlock();
-        }
-    }
-
-
-    @Override
-    public void close() {
-        super.close();
-
-        if(transLog!=null){
-             transLog.sync();
-             transLog.close();
-             if(deleteFilesOnExit){
-                transLog.deleteFile();
-            }
-        }
-
-        transLog = null;
-        //TODO delete trans log logic
-    }
-
-    @Override
-    public void commit() {
-        try{
-            lock.writeLock().lock();
-
-            //dump long stack pages
-            LongMap.LongMapIterator<long[]> iter = longStackPages.longMapIterator();
-            while(iter.moveToNext()){
-                transLog.ensureAvailable(transLogOffset+8+2+LONG_STACK_PAGE_SIZE);
-                transLog.putLong(transLogOffset, WRITE_PHYS_ARRAY|iter.key());
-                transLogOffset+=8;
-                transLog.putUnsignedShort(transLogOffset, LONG_STACK_PAGE_SIZE);
-                transLogOffset+=2;
-                for(long l:iter.value()){
-                    transLog.putLong(transLogOffset, l);
-                    transLogOffset+=8;
-                }
-                checkBufferRounding();
-            }
-
-            //update physical and logical filesize
-            writeIndexValToTransLog(RECID_CURRENT_PHYS_FILE_SIZE, physSize);
-            writeIndexValToTransLog(RECID_CURRENT_INDEX_FILE_SIZE, indexSize);
-
-
-            //seal log file
-            transLog.ensureAvailable(transLogOffset+8);
-            transLog.putLong(transLogOffset, WRITE_SEAL);
-            transLogOffset+=8;
-            //flush log file
-            transLog.sync();
-            //and write mark it was sealed
-            transLog.putLong(8, LOG_SEAL);
-            transLog.sync();
-
-            replayLogFile();
-            reloadIndexFile();
-
-        }catch(IOException e){
-            throw new IOError(e);
-        }finally{
-            lock.writeLock().unlock();
-        }
-    }
-
-    protected void replayLogFile(){
-
-            writeLock_checkLocked();
-            transLogOffset = 0;
-
-            if(transLog!=null){
-                transLog.sync();
-            }
-
-
-            //read headers
-            if(transLog.isEmpty() || transLog.getLong(0)!=HEADER || transLog.getLong(8) !=LOG_SEAL){
-                //wrong headers, discard log
-                transLog.close();
-                transLog.deleteFile();
-                transLog = null;
-                return;
-            }
-
-
-            //all good, start replay
-            transLogOffset=16;
-            long ins = transLog.getLong(transLogOffset);
-            transLogOffset+=8;
-
-            while(ins!=WRITE_SEAL && ins!=0){
-
-                final long offset = ins&PHYS_OFFSET_MASK;
-                ins -=offset;
-
-                if(ins == WRITE_INDEX_LONG_ZERO){
-                    index.ensureAvailable(offset+8);
-                    index.putLong(offset, 0L);
-                }else if(ins == WRITE_INDEX_LONG){
-                    final long value = transLog.getLong(transLogOffset);
-                    transLogOffset+=8;
-                    index.ensureAvailable(offset+8);
-                    index.putLong(offset, value);
-                }else if(ins == WRITE_PHYS_LONG){
-                    final long value = transLog.getLong(transLogOffset);
-                    transLogOffset+=8;
-                    phys.ensureAvailable(offset+8);
-                    phys.putLong(offset, value);
-                }else if(ins == WRITE_PHYS_ARRAY){
-                    final int size = transLog.getUnsignedShort(transLogOffset);
-                    transLogOffset+=2;
-                    //transfer byte[] directly from log file without copying into memory
-                    DataInput2 input = transLog.getDataInput(transLogOffset, size);
-                    synchronized (input.buf){
-                        input.buf.position(input.pos);
-                        input.buf.limit(input.pos+size);
-                        phys.ensureAvailable(offset+size);
-                        phys.putData(offset, input.buf);
-                        input.buf.clear();
-                    }
-                    transLogOffset+=size;
-                }else if(ins == WRITE_SKIP_BUFFER){
-                    transLogOffset += Volume.BUF_SIZE-transLogOffset%Volume.BUF_SIZE;
-                }else{
-                    throw new InternalError("unknown trans log instruction: "+(ins>>>48));
-                }
-
-                ins = transLog.getLong(transLogOffset);
-                transLogOffset+=8;
-            }
-            transLogOffset=0;
-
-            //flush dbs
-            phys.sync();
-            index.sync();
-            //and discard log
-            transLog.putLong(0, 0);
-            transLog.putLong(8, 0); //destroy seal to prevent log file from being replayed
-            transLog.close();
-            transLog.deleteFile();
-            transLog = null;
-    }
-
-
-    @Override
-    public void rollback() {
-        lock.writeLock().lock();
-        try{
-        //discard trans log
-        if(transLog!=null){
-            transLog.close();
-            transLog.deleteFile();
-            transLog = null;
-        }
-
-        reloadIndexFile();
-        }finally{
-            lock.writeLock().unlock();
-        }
-
-    }
-
-    @Override
-    public void compact() {
-        lock.writeLock().lock();
-        try{
-            if(transLog!=null && !transLog.isEmpty())
-                throw new IllegalAccessError("Journal not empty; commit first, than compact");
-            super.compact();
-        }finally {
-            lock.writeLock().unlock();
-        }
-    }
-
-
-    private long[] getLongStackPage(final long physOffset, boolean read){
-        long[] buf = longStackPages.get(physOffset);
-        if(buf == null){
-            buf = new long[LONG_STACK_NUM_OF_RECORDS_PER_PAGE+1];
-            if(read)
-                for(int i=0;i<buf.length;i++){
-                    buf[i] = phys.getLong(physOffset+i*8);
-                }
-            longStackPages.put(physOffset,buf);
-        }
-        return buf;
-    }
-
-    @Override
-    protected long longStackTake(final long listRecid) throws IOException {
-        final long dataOffset = getIndexLong(listRecid) & PHYS_OFFSET_MASK;
-        if(dataOffset == 0)
-            return 0; //there is no such list, so just return 0
-
-        writeLock_checkLocked();
-
-        long[] buf = getLongStackPage(dataOffset,true);
-
-        final int numberOfRecordsInPage = (int) (buf[0]>>>(8*7));
-
-
-        if(numberOfRecordsInPage<=0)
-            throw new InternalError();
-        if(numberOfRecordsInPage>LONG_STACK_NUM_OF_RECORDS_PER_PAGE) throw new InternalError();
-
-        final long ret = buf[numberOfRecordsInPage];
-
-        final long previousListPhysid = buf[0] & PHYS_OFFSET_MASK;
-
-        //was it only record at that page?
-        if(numberOfRecordsInPage == 1){
-            //yes, delete this page
-            long value = previousListPhysid !=0 ?
-                    previousListPhysid | (((long) LONG_STACK_PAGE_SIZE) << 48) :
-                    0L;
-            //update index so it points to previous (or none)
-            writeIndexValToTransLog(listRecid, value);
-
-            //put space used by this page into free list
-            longStackPages.remove(dataOffset); //TODO write zeroes to phys file
-            freePhysRecPut(dataOffset | (((long)LONG_STACK_PAGE_SIZE)<<48));
-        }else{
-            //no, it was not last record at this page, so just decrement the counter
-            buf[0] = previousListPhysid | ((1L*numberOfRecordsInPage-1L)<<(8*7));
-        }
-        return ret;
-
-    }
-
-    @Override
-    protected void longStackPut(final long listRecid, final long offset) throws IOException {
-        writeLock_checkLocked();
-
-        //index position was cleared, put into free index list
-        final long listPhysid2 =getIndexLong(listRecid) & PHYS_OFFSET_MASK;
-
-        if(listPhysid2 == 0){ //empty list?
-            //yes empty, create new page and fill it with values
-            final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK;
-            long[] buf = getLongStackPage(listPhysid,false);
-            if(listPhysid == 0) throw new InternalError();
-            //set number of free records in this page to 1
-            buf[0] = 1L<<(8*7);
-            //set  record
-            buf[1] = offset;
-            //and update index file with new page location
-            writeIndexValToTransLog(listRecid, (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
-        }else{
-            long[] buf = getLongStackPage(listPhysid2,true);
-            final int numberOfRecordsInPage = (int) (buf[0]>>>(8*7));
-            if(numberOfRecordsInPage == LONG_STACK_NUM_OF_RECORDS_PER_PAGE){ //is current page full?
-                //yes it is full, so we need to allocate new page and write our number there
-                final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK;
-                long[] bufNew = getLongStackPage(listPhysid,false);
-                if(listPhysid == 0) throw new InternalError();
-                //final ByteBuffers dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))];
-                //set location to previous page
-                //set number of free records in this page to 1
-                bufNew[0] = listPhysid2 | (1L<<(8*7));
-                //set free record
-                bufNew[1] = offset;
-                //and update index file with new page location
-                writeIndexValToTransLog(listRecid,(((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
-            }else{
-                //there is space on page, so just write released recid and increase the counter
-                buf[1+numberOfRecordsInPage] = offset;
-                buf[0] = (buf[0]&PHYS_OFFSET_MASK) | ((1L*numberOfRecordsInPage+1L)<<(8*7));
-            }
-        }
-    }
-
-
-
-    @Override
-	protected long freePhysRecTake(final int requiredSize) throws IOException {
-        writeLock_checkLocked();
-
-        if(requiredSize<=0) throw new InternalError();
-
-        long freePhysRec = appendOnly? 0L:
-                findFreePhysSlot(requiredSize);
-        if(freePhysRec!=0){
-            return freePhysRec;
-        }
-
-        //No free records found, so lets increase the file size.
-        //We need to take case of growing ByteBuffers.
-        // Also max size of ByteBuffers is 2GB, so we need to use multiple ones
-
-        final long oldFileSize = physSize;
-        if(oldFileSize <=0) throw new InternalError("illegal file size:"+oldFileSize);
-
-        //check if new record would be overflowing BUF_SIZE
-        if(oldFileSize%Volume.BUF_SIZE+requiredSize<=Volume.BUF_SIZE){
-            //no, so just increase file size
-            physSize+=requiredSize;
-            //so just increase buffer size
-
-            //and return this
-            return (((long)requiredSize)<<48) | oldFileSize;
-        }else{
-            //new size is overlapping 2GB ByteBuffers size
-            //so we need to create empty record for 'padding' size to 2GB
-
-            final long  freeSizeToCreate = Volume.BUF_SIZE -  oldFileSize%Volume.BUF_SIZE;
-            if(freeSizeToCreate == 0) throw new InternalError();
-
-            final long nextBufferStartOffset = oldFileSize + freeSizeToCreate;
-            if(nextBufferStartOffset%Volume.BUF_SIZE!=0) throw new InternalError();
-
-            //increase the disk size
-            physSize += freeSizeToCreate + requiredSize;
-
-            //mark 'padding' free record
-            freePhysRecPut((freeSizeToCreate<<48)|oldFileSize);
-
-            //and finally return position at beginning of new buffer
-            return (((long)requiredSize)<<48) | nextBufferStartOffset;
-        }
-
-    }
-
-    @Override
-    protected void unlinkPhysRecord(long indexVal, long recid) throws IOException {
-        if(indexVal == 0) return;
-
-        ArrayList<Long> linkedInTrans = transLinkedPhysRecods.remove(recid);
-        if(linkedInTrans!=null){
-            for(Long l:linkedInTrans){
-                freePhysRecPut(l);
-            }
-            return;
-        }
-
-        if((indexVal>>>48)<MAX_RECORD_SIZE){  //check size
-            //single record
-            freePhysRecPut(indexVal);
-            return;
-        }
-
-        while(indexVal!=0){
-            freePhysRecPut(indexVal);
-            final long offset = indexVal & PHYS_OFFSET_MASK;
-            indexVal = phys.getLong(offset); //read next value
-        }
-    }
-
-}
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreAppend.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreAppend.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreAppend.java	(revision 29484)
@@ -0,0 +1,435 @@
+package org.mapdb;
+
+import java.io.File;
+import java.io.IOError;
+import java.io.IOException;
+import java.util.BitSet;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Append only storage. Uses different file format than Direct and Journaled storage
+ */
+public class StoreAppend implements Engine{
+
+    protected final File file;
+    protected final boolean useRandomAccessFile;
+    protected final boolean readOnly;
+
+    protected final static long FILE_NUMBER_SHIFT = 28;
+    protected final static long FILE_OFFSET_MASK = 0x0FFFFFFFL;
+
+    protected final static long FILE_HEADER = 56465465456465L;
+
+    protected static final int CONCURRENCY_FACTOR = 32;
+    protected final ReentrantReadWriteLock[] readLocks;
+    protected final Lock structuralLock = new ReentrantLock();
+    protected final static Long THUMBSTONE = Long.MIN_VALUE;
+    protected final static int THUMBSTONE_SIZE = -3;
+    protected final static long EOF = -1;
+    protected final static long COMMIT = -2;
+    protected final static long ROLLBACK = -2;
+
+    volatile protected Volume currentVolume;
+    volatile protected long currentVolumeNum;
+    volatile protected int currentFileOffset;
+    volatile protected long maxRecid;
+
+    protected LongConcurrentHashMap<Volume> volumes = new LongConcurrentHashMap<Volume>();
+    protected final LongConcurrentHashMap<Long> recidsInTx = new LongConcurrentHashMap<Long>();
+
+
+    protected final Volume recidsTable = new Volume.MemoryVol(true);
+    protected static final int MAX_FILE_SIZE = 1024 * 1024 * 10;
+
+    public StoreAppend(File file, boolean useRandomAccessFile, boolean readOnly, boolean transactionsDisabled) {
+        this.file = file;
+        this.useRandomAccessFile = useRandomAccessFile;
+        this.readOnly = readOnly;
+        //TODO special mode with transactions disabled
+
+        readLocks = new ReentrantReadWriteLock[CONCURRENCY_FACTOR];
+        for(int i=0;i<readLocks.length;i++) readLocks[i] = new ReentrantReadWriteLock();
+
+        File zeroFile = getFileNum(0);
+        if(zeroFile.exists()){
+            replayLog();
+        }else{
+            //create zero file
+            recidsTable.ensureAvailable(LAST_RESERVED_RECID*8+8);
+            for(long i=0;i<=LAST_RESERVED_RECID;i++){
+                recidsTable.putLong(i*8,0);
+            }
+            maxRecid=LAST_RESERVED_RECID+1;
+            currentVolume = Volume.volumeForFile(zeroFile, useRandomAccessFile, readOnly);
+            currentVolume.ensureAvailable(8);
+            currentVolume.putLong(0, FILE_HEADER);
+            currentFileOffset = 8;
+            volumes.put(0L, currentVolume);
+        }
+
+
+
+
+    }
+
+    protected void replayLog() {
+        try{
+        for(long fileNum=0;;fileNum++){
+            File f = getFileNum(fileNum);
+            if(!f.exists()) return;
+            currentVolume = Volume.volumeForFile(f, useRandomAccessFile, readOnly);
+            volumes.put(fileNum, currentVolume);
+            currentVolumeNum = fileNum;
+
+            //replay file and rebuild recid index table
+            LongHashMap<Long> recidsTable2 = new LongHashMap<Long>();
+            if(!currentVolume.isEmpty()){
+                currentFileOffset =0;
+                long header = currentVolume.getLong(currentFileOffset); currentFileOffset+=8;
+                if(header!=FILE_HEADER) throw new InternalError();
+
+                for(;;){
+                    long recid = currentVolume.getLong(currentFileOffset); currentFileOffset+=8;
+                    maxRecid = Math.max(recid, maxRecid);
+
+                    if(recid == EOF || recid == 0){
+                        break; //end of file
+                    }else if(recid == COMMIT){
+                        //move stuff from temporary table to currently used
+                        commitRecids(recidsTable2);
+                        continue;
+                    }else if(recid == ROLLBACK){
+                        //do not use last recids
+                        recidsTable2.clear();
+                        continue;
+                    }
+
+                    long filePos = (fileNum<<FILE_NUMBER_SHIFT) | currentFileOffset;
+                    int size = currentVolume.getInt(currentFileOffset); currentFileOffset+=4;
+                    if(size!=THUMBSTONE_SIZE){
+                        //skip data
+                        currentFileOffset+=size;
+                        //store location within the log files in memory
+                        recidsTable2.put(recid, filePos);
+                    }else{
+                        //record was deleted (THUMBSTONE mark)
+                        recidsTable2.put(recid, THUMBSTONE);
+                    }
+                }
+
+            }
+        }
+        }catch(IOError e){
+            //TODO error is part of workflow, but maybe change workflow?
+        }
+    }
+
+    protected File getFileNum(long fileNum) {
+        return new File(file.getPath()+"."+fileNum);
+    }
+
+
+    protected void commitRecids(LongMap<Long> recidsTable2) {
+        LongMap.LongMapIterator<Long> iter = recidsTable2.longMapIterator();
+        while(iter.moveToNext()){
+            long recidsTableOffset = iter.key()*8;
+            recidsTable.ensureAvailable(recidsTableOffset+8);
+            recidsTable.putLong(recidsTableOffset, iter.value());
+        }
+        recidsTable2.clear();
+    }
+
+
+    @Override
+    public <A> long put(A value, Serializer<A> serializer) {
+
+        DataOutput2 out = Utils.serializer(serializer,value);
+
+        long recid;
+        long pos;
+        long volNum;
+        Volume vol;
+        structuralLock.lock();
+        try{
+            recid= maxRecid++; //TODO free recid management
+            pos = currentFileOffset;
+            currentFileOffset += 8 + 4 + out.pos;
+            currentVolume.ensureAvailable(currentFileOffset);
+            volNum = currentVolumeNum;
+            vol = currentVolume;
+            rollOverFile();
+        }finally {
+            structuralLock.unlock();
+        }
+        Lock lock = readLocks[Utils.longHash(recid)%readLocks.length].writeLock();
+        lock.lock();
+        try{
+            vol.putLong(pos, recid);
+            pos+=8;
+            long filePos = (volNum<<FILE_NUMBER_SHIFT) | pos;
+            vol.putInt(pos,out.pos);
+            pos+=4;
+            vol.putData(pos,out.buf, 0, out.pos);
+            recidsInTx.put(recid, filePos);
+
+            return recid;
+        }finally {
+            lock.unlock();
+        }
+    }
+
+    @Override
+    public <A> A get(long recid, Serializer<A> serializer) {
+        Lock lock = readLocks[Utils.longHash(recid)%readLocks.length].readLock();
+        lock.lock();
+        try {
+            return getNoLock(recid, serializer);
+        } catch (IOException e) {
+            throw new IOError(e);
+        }finally {
+            lock.unlock();
+        }
+
+    }
+
+    protected <A> A getNoLock(long recid, Serializer<A> serializer) throws IOException {
+        Long fileNum2 = recidsInTx.get(recid);
+        if(fileNum2 == null){
+                recidsTable.ensureAvailable(recid*8+8);
+                fileNum2 = recidsTable.getLong(recid*8);
+        }
+
+        if(fileNum2 == THUMBSTONE){  //there is warning about '==', it is ok
+            //record was deleted;
+            return null;
+        }
+
+        if(fileNum2 == 0){
+            return serializer.deserialize(new DataInput2(new byte[0]), 0);
+        }
+
+        long fileNum = fileNum2;
+
+        long fileOffset = fileNum & FILE_OFFSET_MASK;
+        if(fileOffset>MAX_FILE_SIZE) throw new InternalError();
+        fileNum = fileNum>>>FILE_NUMBER_SHIFT;
+        Volume v = volumes.get(fileNum);
+
+        int size = v.getInt(fileOffset);
+        DataInput2 input = v.getDataInput(fileOffset+4, size);
+
+        return serializer.deserialize(input, size);
+    }
+
+    @Override
+    public <A> void update(long recid, A value, Serializer<A> serializer) {
+        DataOutput2 out = Utils.serializer(serializer,value);
+        Lock lock = readLocks[Utils.longHash(recid)%readLocks.length].writeLock();
+        lock.lock();
+        try{
+
+            long pos;
+            long volNum;
+            Volume vol;
+            structuralLock.lock();
+            try{
+                pos = currentFileOffset;
+                currentFileOffset += 8 + 4 + out.pos;
+                currentVolume.ensureAvailable(currentFileOffset);
+                volNum = currentVolumeNum;
+                vol = currentVolume;
+                rollOverFile();
+            }finally {
+                structuralLock.unlock();
+            }
+            vol.putLong(pos, recid);
+            pos+=8;
+            long filePos = (volNum<<FILE_NUMBER_SHIFT) | pos;
+            vol.putInt(pos,out.pos);
+            pos+=4;
+            vol.putData(pos,out.buf, 0, out.pos);
+            recidsInTx.put(recid, filePos);
+        }finally {
+            lock.unlock();
+        }
+    }
+
+    @Override
+    public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
+        Lock lock = readLocks[Utils.longHash(recid)%readLocks.length].writeLock();
+        lock.lock();
+        try{
+            Object oldVal = get(recid, serializer);
+            //TODO compare binary stuff?
+            if(!((oldVal==null && expectedOldValue==null)|| (oldVal!=null && oldVal.equals(expectedOldValue)))){
+                return false;
+            }
+
+            DataOutput2 out = Utils.serializer(serializer,newValue);
+
+            long pos;
+            long volNum;
+            Volume vol;
+            structuralLock.lock();
+            try{
+                pos = currentFileOffset;
+                currentFileOffset += 8 + 4 + out.pos;
+                currentVolume.ensureAvailable(currentFileOffset);
+                volNum = currentVolumeNum;
+                vol = currentVolume;
+                rollOverFile();
+            }finally {
+                structuralLock.unlock();
+            }
+            vol.putLong(pos, recid);
+            pos+=8;
+            long filePos = (volNum<<FILE_NUMBER_SHIFT) | pos;
+            vol.putInt(pos,out.pos);
+            pos+=4;
+            vol.putData(pos,out.buf, 0, out.pos);
+            recidsInTx.put(recid, filePos);
+            return true;
+        }finally {
+            lock.unlock();
+        }
+    }
+
+    @Override
+    public <A> void delete(long recid, Serializer<A> serializer){
+        Lock lock = readLocks[Utils.longHash(recid)%readLocks.length].writeLock();
+        lock.lock();
+        try{
+            structuralLock.lock();
+            try{
+                currentVolume.ensureAvailable(currentFileOffset+8+4);
+                currentVolume.putLong(currentFileOffset, recid);
+                currentFileOffset+=8;
+                currentVolume.putInt(currentFileOffset, THUMBSTONE_SIZE);
+                currentFileOffset+=4;
+                recidsInTx.put(recid, THUMBSTONE);
+                rollOverFile();
+
+            }finally{
+                structuralLock.unlock();
+            }
+
+        }finally {
+            lock.unlock();
+        }
+
+    }
+
+    @Override
+    public void close() {
+        structuralLock.lock();
+        currentVolume.sync();
+        currentVolume.close();
+        currentVolume = null;
+        volumes = null;
+        structuralLock.unlock();
+    }
+
+    @Override
+    public boolean isClosed() {
+        return volumes==null;
+    }
+
+    @Override
+    public void commit() {
+        //TODO lock all locks?
+        //append commit mark
+        structuralLock.lock();
+        try{
+            commitRecids(recidsInTx);
+            currentVolume.ensureAvailable(currentFileOffset+8);
+            currentVolume.putLong(currentFileOffset, COMMIT);
+            currentFileOffset+=8;
+            currentVolume.sync();
+            rollOverFile();
+        }finally {
+            structuralLock.unlock();
+        }
+    }
+
+    @Override
+    public void rollback() throws UnsupportedOperationException {
+        //TODO lock all locks?
+        //append rollback mark
+        structuralLock.lock();
+        try{
+            currentVolume.ensureAvailable(currentFileOffset+8);
+            currentVolume.putLong(currentFileOffset, ROLLBACK);
+            currentFileOffset+=8;
+            currentVolume.sync();
+            recidsInTx.clear();
+            rollOverFile();
+        }finally {
+            structuralLock.unlock();
+        }
+
+
+    }
+
+
+    /** check if current file is too big, if yes finish it and start next file */
+    protected void rollOverFile(){
+        if(currentFileOffset<MAX_FILE_SIZE-8) return;
+
+        currentVolume.ensureAvailable(currentFileOffset + 8);
+        currentVolume.putLong(currentFileOffset, EOF);
+        currentVolume.sync();
+        currentVolumeNum++;
+        currentVolume = Volume.volumeForFile(
+                getFileNum(currentVolumeNum), useRandomAccessFile, readOnly);
+        currentVolume.ensureAvailable(MAX_FILE_SIZE);
+        currentVolume.putLong(0, FILE_HEADER);
+        currentFileOffset = 8;
+        currentVolume.sync();
+        volumes.put(currentVolumeNum,currentVolume);
+    }
+
+
+    @Override
+    public boolean isReadOnly() {
+        return readOnly;
+    }
+
+    @Override
+    public void compact() {
+        //traverse list of recids, find and delete files which are not used
+        //TODO lock all locks?
+        structuralLock.lock();
+        try{
+            if(!recidsInTx.isEmpty()) throw new IllegalAccessError("Uncommited changes");
+
+            LongHashMap<Boolean> ff = new LongHashMap<Boolean>();
+            for(long recid=0;recid<maxRecid;recid++){
+                long indexVal = recidsTable.getLong(recid*8);
+                if(indexVal ==0)continue;
+                long fileNum = indexVal>>>FILE_NUMBER_SHIFT;
+                ff.put(fileNum,true);
+            }
+
+            //now traverse files and delete unused
+            LongMap.LongMapIterator<Volume> iter = volumes.longMapIterator();
+            while(iter.moveToNext()){
+                long recid = iter.key();
+                if(ff.get(recid)!=null) continue;
+                Volume v = iter.value();
+                v.close();
+                v.deleteFile();
+                iter.remove();
+            }
+
+        }finally {
+            structuralLock.unlock();
+        }
+
+
+    }
+
+}
+
+
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreDirect.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreDirect.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreDirect.java	(revision 29484)
@@ -0,0 +1,652 @@
+/*
+ *  Copyright (c) 2012 Jan Kotek
+ *
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.mapdb;
+
+import java.io.File;
+import java.io.IOError;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Storage Engine which saves record directly into file.
+ * Is used when transaction journal is disabled.
+ *
+ * @author Jan Kotek
+ */
+public class StoreDirect implements Engine{
+
+    protected static final long MASK_OFFSET = 0x0000FFFFFFFFFFFFL;
+
+    protected static final long MASK_SIZE = 0x7fff000000000000L;
+    protected static final long MASK_IS_LINKED = 0x8000000000000000L;
+
+    protected static final long HEADER = 9032094932889042394L;
+
+    /** maximal non linked record size */
+    protected static final int MAX_REC_SIZE = 32767;
+
+    /** number of free physical slots */
+    protected static final int PHYS_FREE_SLOTS_COUNT = 2048;
+
+    /** index file offset where current size of index file is stored*/
+    protected static final int IO_INDEX_SIZE = 1*8;
+    /** index file offset where current size of phys file is stored */
+    protected static final int IO_PHYS_SIZE = 2*8;
+    /** index file offset where reference to longstack of free recid is stored*/
+    protected static final int IO_FREE_RECID = 15*8;
+
+    /** index file offset where first recid available to user is stored */
+    protected static final int IO_USER_START = IO_FREE_RECID+PHYS_FREE_SLOTS_COUNT*8+8;
+
+    public static final String DATA_FILE_EXT = ".p";
+
+
+    static final int LONG_STACK_PER_PAGE = 204;
+
+    static final int LONG_STACK_PAGE_SIZE =   8 + LONG_STACK_PER_PAGE * 6;
+
+
+    protected final ReentrantReadWriteLock[] locks = Utils.newReadWriteLocks(32);
+    protected final ReentrantLock structuralLock = new ReentrantLock();
+
+    protected Volume index;
+    protected Volume phys;
+
+    protected long physSize;
+    protected long indexSize;
+
+    protected final boolean deleteFilesAfterClose;
+
+    protected final boolean readOnly;
+
+    public StoreDirect(Volume.Factory volFac, boolean readOnly, boolean deleteFilesAfterClose) {
+        this.readOnly = readOnly;
+        this.deleteFilesAfterClose = deleteFilesAfterClose;
+
+
+        index = volFac.createIndexVolume();
+        phys = volFac.createPhysVolume();
+        if(index.isEmpty()){
+            createStructure();
+        }else{
+            checkHeaders();
+            indexSize = index.getLong(IO_INDEX_SIZE);
+            physSize = index.getLong(IO_PHYS_SIZE);
+        }
+
+    }
+
+    public StoreDirect(Volume.Factory volFac) {
+        this(volFac, false,false);
+    }
+
+    protected void checkHeaders() {
+        if(index.getLong(0)!=HEADER||phys.getLong(0)!=HEADER)throw new IOError(new IOException("storage has invalid header"));
+    }
+
+    protected void createStructure() {
+        indexSize = IO_USER_START+Engine.LAST_RESERVED_RECID*8+8;
+        index.ensureAvailable(indexSize);
+        for(int i=0;i<indexSize;i+=8) index.putLong(i,0L);
+        index.putLong(0, HEADER);
+        index.putLong(IO_INDEX_SIZE,indexSize);
+        physSize =16;
+        phys.ensureAvailable(physSize);
+        phys.putLong(0, HEADER);
+        index.putLong(IO_PHYS_SIZE,physSize);
+    }
+
+
+    @Override
+    public <A> long put(A value, Serializer<A> serializer) {
+        DataOutput2 out = serialize(value, serializer);
+
+        structuralLock.lock();
+        final long ioRecid;
+        final long[] indexVals;
+        try{
+            ioRecid = freeIoRecidTake(true) ;
+            indexVals = physAllocate(out.pos,true);
+        }finally {
+            structuralLock.unlock();
+        }
+
+        put2(out, ioRecid, indexVals);
+
+        return (ioRecid-IO_USER_START)/8;
+    }
+
+    private void put2(DataOutput2 out, long ioRecid, long[] indexVals) {
+        index.putLong(ioRecid, indexVals[0]);
+        //write stuff
+        if(indexVals.length==1||indexVals[1]==0){ //is more then one? ie linked
+            //write single
+
+            phys.putData(indexVals[0]&MASK_OFFSET, out.buf, 0, out.pos);
+
+        }else{
+            int outPos = 0;
+            //write linked
+            for(int i=0;i<indexVals.length;i++){
+                final int c = ccc(indexVals.length, i);
+                final long indexVal = indexVals[i];
+                final boolean isLast = (indexVal & MASK_IS_LINKED) ==0;
+                if(isLast!=(i==indexVals.length-1)) throw new InternalError();
+                final int size = (int) ((indexVal& MASK_SIZE)>>48);
+                final long offset = indexVal&MASK_OFFSET;
+
+                //write data
+                phys.putData(offset+c,out.buf,outPos, size-c);
+                outPos+=size-c;
+
+                if(c>0){
+                    //write position of next linked record
+                    phys.putLong(offset, indexVals[i+1]);
+                }
+                if(c==12){
+                    //write total size in first record
+                    phys.putInt(offset+8, out.pos);
+                }
+            }
+            if(outPos!=out.pos) throw new InternalError();
+        }
+    }
+
+
+    @Override
+    public <A> A get(long recid, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.readLock(locks, recid);
+        try{
+            return get2(ioRecid,serializer);
+        }catch(IOException e){
+            throw new IOError(e);
+        }finally{
+            Utils.readUnlock(locks, recid);
+        }
+    }
+
+    protected <A> A get2(long ioRecid,Serializer<A> serializer) throws IOException {
+        long indexVal = index.getLong(ioRecid);
+
+        int size = (int) ((indexVal&MASK_SIZE)>>>48);
+        DataInput2 di;
+        long offset = indexVal&MASK_OFFSET;
+        if((indexVal&MASK_IS_LINKED)==0){
+            //read single record
+            di = phys.getDataInput(offset, size);
+
+        }else{
+            //is linked, first construct buffer we will read data to
+            int totalSize = phys.getInt(offset+8);
+            int pos = 0;
+            int c = 12;
+            byte[] buf = new byte[totalSize];
+            //read parts into segment
+            for(;;){
+                DataInput2 in = phys.getDataInput(offset + c, size-c);
+                in.readFully(buf,pos,size-c);
+                pos+=size-c;
+                if(c==0) break;
+                //read next part
+                long next = phys.getLong(offset);
+                offset = next&MASK_OFFSET;
+                size = (int) ((next&MASK_SIZE)>>>48);
+                //is the next part last?
+                c =  ((next&MASK_IS_LINKED)==0)? 0 : 8;
+            }
+            if(pos!=totalSize) throw new InternalError();
+            di = new DataInput2(buf);
+            size = totalSize;
+        }
+        int start = di.pos;
+        A ret = serializer.deserialize(di,size);
+        if(size+start>di.pos)throw new InternalError("data were not fully read, check your serializier");
+        if(size+start<di.pos)throw new InternalError("data were read beyond record size, check your serializier");
+        return ret;
+    }
+
+
+    @Override
+    public <A> void update(long recid, A value, Serializer<A> serializer) {
+        DataOutput2 out = serialize(value, serializer);
+
+        final long ioRecid = IO_USER_START + recid*8;
+
+        Utils.writeLock(locks, recid);
+        try{
+            final long[] indexVals;
+            structuralLock.lock();
+            try{
+                indexVals = physAllocate(out.pos,true);
+            }finally {
+                structuralLock.unlock();
+            }
+
+            put2(out, ioRecid, indexVals);
+        }finally{
+            Utils.writeUnlock(locks, recid);
+        }
+    }
+
+    @Override
+    public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.writeLock(locks, recid);
+        try{
+            /*
+             * deserialize old value
+             */
+
+            A oldVal = get2(ioRecid,serializer);
+
+            /*
+             * compare oldValue and expected
+             */
+            if((oldVal == null && expectedOldValue!=null) || (oldVal!=null && !oldVal.equals(expectedOldValue)))
+                return false;
+
+            /*
+             * write new value
+             */
+            DataOutput2 out = serialize(newValue, serializer);
+
+            final long[] indexVals;
+            structuralLock.lock();
+            try{
+                indexVals = physAllocate(out.pos,true);
+            }finally {
+                structuralLock.unlock();
+            }
+
+            put2(out, ioRecid, indexVals);
+            return true;
+        }catch(IOException e){
+            throw new IOError(e);
+        }finally{
+            Utils.writeUnlock(locks, recid);
+        }
+    }
+
+    @Override
+    public <A> void delete(long recid, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.writeLock(locks, recid);
+        try{
+            //get index val and zero it out
+            final long indexVal = index.getLong(ioRecid);
+            index.putLong(ioRecid,0L);
+
+            long[] linkedRecords = null;
+            int linkedPos = 0;
+            if((indexVal&MASK_IS_LINKED)!=0){
+                //record is composed of multiple linked records, so collect all of them
+                linkedRecords = new long[2];
+
+                //traverse linked records
+                long linkedVal = phys.getLong(indexVal&MASK_OFFSET);
+                for(;;){
+                    if(linkedPos==linkedRecords.length) //grow if necessary
+                        linkedRecords = Arrays.copyOf(linkedRecords, linkedRecords.length*2);
+                    //store last linkedVal
+                    linkedRecords[linkedPos] = linkedVal;
+
+                    if((linkedVal&MASK_IS_LINKED)==0){
+                        break; //this is last linked record, so break
+                    }
+                    //move and read to next
+                    linkedPos++;
+                    linkedVal = phys.getLong(linkedVal&MASK_OFFSET);
+                }
+            }
+
+            //now lock everything and mark free space
+            structuralLock.lock();
+            try{
+                //free recid
+                freeIoRecidPut(ioRecid);
+                //free first record pointed from indexVal
+                freePhysPut(indexVal);
+
+                //if there are more linked records, free those as well
+                if(linkedRecords!=null){
+                    for(int i=0;i<linkedPos;i++){
+                        freePhysPut(linkedRecords[i]);
+                    }
+                }
+            }finally {
+                structuralLock.unlock();
+            }
+
+        }finally{
+            Utils.writeUnlock(locks, recid);
+        }
+    }
+
+    protected long[] physAllocate(int size, boolean ensureAvail) {
+        if(size==0L) return new long[]{0L};
+        //append to end of file
+        if(size<MAX_REC_SIZE){
+            long indexVal = freePhysTake(size,ensureAvail);
+            indexVal |= ((long)size)<<48;
+            return new long[]{indexVal};
+        }else{
+            long[] ret = new long[2];
+            int retPos = 0;
+            int c = 12;
+
+            while(size>0){
+                if(retPos == ret.length) ret = Arrays.copyOf(ret, ret.length*2);
+                int allocSize = Math.min(size, MAX_REC_SIZE);
+                size -= allocSize - c;
+
+                //append to end of file
+                long indexVal = freePhysTake(allocSize, ensureAvail);
+                indexVal |= (((long)allocSize)<<48);
+                if(c!=0) indexVal|=MASK_IS_LINKED;
+                ret[retPos++] = indexVal;
+
+                c = size<=MAX_REC_SIZE ? 0 : 8;
+            }
+            if(size!=0) throw new InternalError();
+
+            return Arrays.copyOf(ret, retPos);
+        }
+    }
+
+
+
+    protected static long roundTo16(long offset){
+        long rem = offset%16;
+        if(rem!=0) offset +=16-rem;
+        return offset;
+    }
+
+
+    protected static int ccc(int size, int i) {
+        return (size==1|| i==size-1)? 0: (i==0?12:8);
+    }
+
+
+
+    @Override
+    public void close() {
+        structuralLock.lock();
+        Utils.writeLockAll(locks);
+        if(!readOnly){
+            index.putLong(IO_PHYS_SIZE,physSize);
+            index.putLong(IO_INDEX_SIZE,indexSize);
+        }
+
+        index.sync();
+        phys.sync();
+        index.close();
+        phys.close();
+        if(deleteFilesAfterClose){
+            index.deleteFile();
+            phys.deleteFile();
+        }
+        index = null;
+        phys = null;
+        Utils.writeUnlockAll(locks);
+        structuralLock.unlock();
+    }
+
+    @Override
+    public boolean isClosed() {
+        return index==null;
+    }
+
+    @Override
+    public void commit() {
+        if(!readOnly){
+            index.putLong(IO_PHYS_SIZE,physSize);
+            index.putLong(IO_INDEX_SIZE,indexSize);
+        }
+        index.sync();
+        phys.sync();
+
+    }
+
+    @Override
+    public void rollback() throws UnsupportedOperationException {
+        throw new UnsupportedOperationException("rollback not supported with journal disabled");
+    }
+
+    @Override
+    public boolean isReadOnly() {
+        return readOnly;
+    }
+
+    @Override
+    public void compact() {
+
+        if(readOnly) throw new IllegalAccessError();
+        index.putLong(IO_PHYS_SIZE,physSize);
+        index.putLong(IO_INDEX_SIZE,indexSize);
+
+        if(index.getFile()==null) throw new UnsupportedOperationException("compact not supported for memory storage yet");
+        structuralLock.lock();
+        for(ReentrantReadWriteLock l:locks) l.writeLock().lock();
+        try{
+            //create secondary files for compaction
+            //TODO RAF
+            //TODO memory based stores
+            final File indexFile = index.getFile();
+            final File physFile = phys.getFile();
+            final boolean isRaf = index instanceof Volume.RandomAccessFileVol;
+            Volume.Factory fab = Volume.fileFactory(false, isRaf, new File(indexFile+".compact"));
+            StoreDirect store2 = new StoreDirect(fab);
+            store2.structuralLock.lock();
+
+            //transfer stack of free recids
+            for(long recid =longStackTake(IO_FREE_RECID);
+                recid!=0; recid=longStackTake(IO_FREE_RECID)){
+                store2.longStackPut(recid, IO_FREE_RECID);
+            }
+
+            //iterate over recids and transfer physical records
+            store2.index.putLong(IO_INDEX_SIZE, indexSize);
+
+            for(long ioRecid = IO_USER_START; ioRecid<indexSize;ioRecid+=8){
+                byte[] bb = get2(ioRecid,Serializer.BYTE_ARRAY_SERIALIZER);
+                long[] indexVals = store2.physAllocate(bb.length,true);
+                DataOutput2 out = new DataOutput2();
+                out.buf = bb;
+                out.pos = bb.length;
+                store2.index.ensureAvailable(ioRecid+8);
+                store2.put2(out, ioRecid,indexVals);
+            }
+
+
+
+            File indexFile2 = store2.index.getFile();
+            File physFile2 = store2.phys.getFile();
+            store2.structuralLock.unlock();
+            store2.close();
+
+            long time = System.currentTimeMillis();
+            File indexFile_ = new File(indexFile.getPath()+"_"+time+"_orig");
+            File physFile_ = new File(physFile.getPath()+"_"+time+"_orig");
+
+            index.close();
+            phys.close();
+            if(!indexFile.renameTo(indexFile_))throw new InternalError("could not rename file");
+            if(!physFile.renameTo(physFile_))throw new InternalError("could not rename file");
+
+            if(!indexFile2.renameTo(indexFile))throw new InternalError("could not rename file");
+            //TODO process may fail in middle of rename, analyze sequence and add recovery
+            if(!physFile2.renameTo(physFile))throw new InternalError("could not rename file");
+
+            indexFile_.delete();
+            physFile_.delete();
+
+            Volume.Factory fac2 = Volume.fileFactory(false, isRaf, indexFile);
+            index = fac2.createIndexVolume();
+            phys = fac2.createPhysVolume();
+
+        }catch(IOException e){
+            throw new IOError(e);
+        }finally {
+            structuralLock.unlock();
+            for(ReentrantReadWriteLock l:locks) l.writeLock().unlock();
+        }
+
+    }
+
+
+    protected long longStackTake(final long ioList) {
+        if(!structuralLock.isLocked())throw new InternalError();
+        if(ioList<IO_FREE_RECID || ioList>=IO_USER_START) throw new IllegalArgumentException("wrong ioList: "+ioList);
+
+        final long dataOffset = index.getLong(ioList) &MASK_OFFSET;
+        if(dataOffset == 0)
+            return 0; //there is no such list, so just return 0
+
+
+        final int numberOfRecordsInPage = phys.getUnsignedByte(dataOffset);
+
+        if(numberOfRecordsInPage<=0)
+            throw new InternalError();
+        if(numberOfRecordsInPage> LONG_STACK_PER_PAGE)
+            throw new InternalError();
+
+        final long ret = phys.getSixLong(dataOffset + 2 + numberOfRecordsInPage * 6);
+
+        //was it only record at that page?
+        if(numberOfRecordsInPage == 1){
+            //yes, delete this page
+            final long previousListPhysid =phys.getSixLong(dataOffset+2);
+            if(previousListPhysid !=0){
+                //update index so it points to previous page
+                index.putLong(ioList , previousListPhysid | (((long) LONG_STACK_PAGE_SIZE) << 48));
+            }else{
+                //zero out index
+                index.putLong(ioList , 0L);
+            }
+            //put space used by this page into free list
+            freePhysPut(dataOffset | (((long)LONG_STACK_PAGE_SIZE)<<48));
+        }else{
+            //no, it was not last record at this page, so just decrement the counter
+            phys.putUnsignedByte(dataOffset, (byte) (numberOfRecordsInPage - 1));
+        }
+
+        //System.out.println("longStackTake: "+ioList+" - "+ret);
+
+        return ret;
+
+    }
+
+
+    protected void longStackPut(final long ioList, long offset){
+        offset = offset & MASK_OFFSET;
+        if(!structuralLock.isLocked())throw new InternalError();
+        if(ioList<IO_FREE_RECID || ioList>=IO_USER_START) throw new InternalError("wrong ioList: "+ioList);
+
+        //System.out.println("longStackPut: "+ioList+" - "+offset);
+
+        //index position was cleared, put into free index list
+        final long listPhysid2 = index.getLong(ioList) &MASK_OFFSET;
+
+        if(listPhysid2 == 0){ //empty list?
+            //yes empty, create new page and fill it with values
+            final long listPhysid = freePhysTake(LONG_STACK_PAGE_SIZE,true) &MASK_OFFSET;
+            if(listPhysid == 0) throw new InternalError();
+            //set previous Free Index List page to zero as this is first page
+            phys.putSixLong(listPhysid + 2, 0L);
+            //set number of free records in this page to 1
+            phys.putUnsignedByte(listPhysid, (byte) 1);
+            //set  record
+            phys.putSixLong(listPhysid + 8, offset);
+            //and update index file with new page location
+            index.putLong(ioList , (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
+        }else{
+            final int numberOfRecordsInPage = phys.getUnsignedByte(listPhysid2);
+            if(numberOfRecordsInPage == LONG_STACK_PER_PAGE){ //is current page full?
+                //yes it is full, so we need to allocate new page and write our number there
+
+                final long listPhysid = freePhysTake(LONG_STACK_PAGE_SIZE,true) &MASK_OFFSET;
+                if(listPhysid == 0) throw new InternalError();
+                //final ByteBuffers dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))];
+                //set location to previous page
+                phys.putSixLong(listPhysid + 2, listPhysid2);
+                //set number of free records in this page to 1
+                phys.putUnsignedByte(listPhysid, (byte) 1);
+                //set free record
+                phys.putSixLong(listPhysid + 8, offset);
+                //and update index file with new page location
+                index.putLong(ioList , (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid);
+            }else{
+                //there is space on page, so just write released recid and increase the counter
+                phys.putSixLong(listPhysid2 + 8 + 6 * numberOfRecordsInPage, offset);
+                phys.putUnsignedByte(listPhysid2, (byte) (numberOfRecordsInPage + 1));
+            }
+        }
+    }
+
+
+
+    protected void freeIoRecidPut(long ioRecid) {
+        longStackPut(IO_FREE_RECID, ioRecid);
+    }
+
+    protected long freeIoRecidTake(boolean ensureAvail){
+        long ioRecid = longStackTake(IO_FREE_RECID);
+        if(ioRecid!=0) return ioRecid;
+        indexSize+=8;
+        if(ensureAvail)
+            index.ensureAvailable(indexSize);
+        return indexSize-8;
+    }
+
+    protected static final long size2ListIoRecid(long size){
+        return IO_FREE_RECID + 8 + ((size-1)/16)*8;
+    }
+    protected void freePhysPut(long indexVal) {
+        long size = (indexVal&MASK_SIZE) >>>48;
+        longStackPut(size2ListIoRecid(size), indexVal & MASK_OFFSET);
+    }
+
+    protected long freePhysTake(int size, boolean ensureAvail) {
+        if(size==0)throw new IllegalArgumentException();
+
+        //check free space
+        long ret =  longStackTake(size2ListIoRecid(size));
+        if(ret!=0) return ret;
+        //not available, increase file size
+        if(physSize%Volume.BUF_SIZE+size>Volume.BUF_SIZE)
+            physSize += Volume.BUF_SIZE - physSize%Volume.BUF_SIZE;
+        long physSize2 = physSize;
+        physSize = roundTo16(physSize+size);
+        if(ensureAvail)
+            phys.ensureAvailable(physSize);
+        return physSize2;
+    }
+
+
+    protected <A> DataOutput2 serialize(A value, Serializer<A> serializer) {
+        try {
+            DataOutput2 out = new DataOutput2();
+            serializer.serialize(out,value);
+            return out;
+        } catch (IOException e) {
+            throw new IOError(e);
+        }
+    }
+
+}
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreWAL.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreWAL.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/StoreWAL.java	(revision 29484)
@@ -0,0 +1,480 @@
+package org.mapdb;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Write-Ahead-Log
+ */
+public class StoreWAL extends StoreDirect {
+
+    protected static final byte WAL_INDEX_LONG = 101;
+    protected static final byte WAL_PHYS_LONG = 102;
+    protected static final byte WAL_PHYS_SIX_LONG = 103;
+    protected static final byte WAL_PHYS_ARRAY = 104;
+    protected static final byte WAL_SKIP_REST_OF_BLOCK = 105;
+
+    /** last instruction in log file */
+    protected static final byte WAL_SEAL = 111;
+    /** added to offset 8 into log file, indicates that log was synced and closed*/
+    protected static final long LOG_SEAL = 4566556446554645L;
+
+    public static final String TRANS_LOG_FILE_EXT = ".t";
+
+    protected static final long[] TOMBSTONE = new long[1];
+
+    protected final Volume.Factory volFac;
+    protected Volume log;
+
+    protected long logSize;
+
+    protected final LongConcurrentHashMap<long[]> modified = new LongConcurrentHashMap<long[]>();
+
+
+    public StoreWAL(Volume.Factory volFac) {
+        this(volFac,false,false);
+    }
+    public StoreWAL(Volume.Factory volFac, boolean readOnly, boolean deleteFilesAfterClose) {
+        super(volFac, readOnly, deleteFilesAfterClose);
+        this.volFac = volFac;
+        this.log = volFac.createTransLogVolume();
+        reloadIndexFile();
+        replayLogFile();
+        log = null;
+    }
+
+    protected void reloadIndexFile() {
+        logSize = 0;
+        modified.clear();
+        indexSize = index.getLong(IO_INDEX_SIZE);
+        physSize = index.getLong(IO_PHYS_SIZE);
+    }
+
+    protected void openLogIfNeeded(){
+        if(log !=null) return;
+        log = volFac.createTransLogVolume();
+        log.ensureAvailable(16);
+        log.putLong(0, HEADER);
+        log.putLong(8, 0L);
+        logSize = 16;
+    }
+
+
+
+
+    @Override
+    public <A> long put(A value, Serializer<A> serializer) {
+        DataOutput2 out = serialize(value, serializer);
+
+        final long ioRecid;
+        final long[] physPos;
+        final long[] logPos;
+
+        structuralLock.lock();
+        try{
+            openLogIfNeeded();
+            ioRecid = freeIoRecidTake(false);
+            //first get space in phys
+            physPos = physAllocate(out.pos,false);
+            //now get space in log
+            logPos = logAllocate(physPos);
+
+        }finally{
+            structuralLock.unlock();
+        }
+
+        //write data into log
+        walIndexVal((logPos[0]&MASK_OFFSET) - 1-8-8-1-8, ioRecid, physPos[0]);
+        walPhysArray(out, physPos, logPos);
+
+        modified.put(ioRecid,logPos);
+        return (ioRecid-IO_USER_START)/8;
+    }
+
+    protected void walPhysArray(DataOutput2 out, long[] physPos, long[] logPos) {
+        //write byte[] data
+        int outPos = 0;
+
+        for(int i=0;i<logPos.length;i++){
+            int c = ccc(logPos.length, i);
+            long pos = logPos[i]&MASK_OFFSET;
+            int size = (int) ((logPos[i]&MASK_SIZE) >>>48);
+
+            log.putByte(pos -  8 - 1, WAL_PHYS_ARRAY);
+            log.putLong(pos -  8, physPos[i]);
+
+            if(c>0){
+                log.putLong(pos, physPos[i + 1]);
+                pos+=8;
+            }
+            if(c==12){
+                log.putInt(pos, out.pos);
+                pos+=4;
+            }
+            log.putData(pos, out.buf, outPos, size - c);
+            outPos +=size-c;
+        }
+        if(outPos!=out.pos)throw new InternalError();
+    }
+
+
+    protected void walIndexVal(long logPos, long ioRecid, long indexVal) {
+
+        log.putByte(logPos, WAL_INDEX_LONG);
+        log.putLong(logPos + 1, ioRecid);
+        log.putLong(logPos + 9, indexVal);
+
+    }
+
+
+    protected long[] logAllocate(long[] physPos) {
+
+        openLogIfNeeded();
+        logSize+=1+8+8; //space used for index val
+
+        long[] ret = new long[physPos.length];
+        for(int i=0;i<physPos.length;i++){
+            long size = (physPos[i]&MASK_SIZE)>>>48;
+            //would overlaps Volume Block?
+            checkLogRounding();
+            logSize+=1+8; //space used for WAL_PHYS_ARRAY
+            ret[i] = (size<<48) | logSize;
+
+            logSize+=size;
+
+        }
+
+        log.ensureAvailable(logSize);
+        return ret;
+    }
+
+    protected void checkLogRounding() {
+        if(logSize% Volume.BUF_SIZE+MAX_REC_SIZE*2>Volume.BUF_SIZE){
+            log.ensureAvailable(logSize+1);
+            log.putByte(logSize, WAL_SKIP_REST_OF_BLOCK);
+            logSize += Volume.BUF_SIZE - logSize%Volume.BUF_SIZE;
+        }
+    }
+
+
+    @Override
+    public <A> A get(long recid, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.readLock(locks,recid);
+        try{
+            return get2(ioRecid, serializer);
+        }catch(IOException e){
+            throw new IOError(e);
+        }finally{
+            Utils.readUnlock(locks,recid);
+        }
+    }
+
+    @Override
+    protected <A> A get2(long ioRecid, Serializer<A> serializer) throws IOException {
+        //check if record was modified in current transaction
+        long[] r = modified.get(ioRecid);
+        //yes, read version
+        if(r==null) return super.get2(ioRecid, serializer);
+        //chech for tombstone (was deleted in current trans)
+        if(r==TOMBSTONE || r.length==0) return null;
+
+        //was modified in current transaction, so read it from trans log
+        if(r.length==1){
+            //single record
+            final int size = (int) ((r[0]&MASK_SIZE)>>>48);
+            DataInput2 in = log.getDataInput(r[0]&MASK_OFFSET, size);
+            return serializer.deserialize(in, size);
+        }else{
+            //linked record
+            int totalSize = 0;
+            for(int i=0;i<r.length;i++){
+                int c = ccc(r.length, i);
+                totalSize+=  (int) ((r[i]&MASK_SIZE)>>>48)-c;
+            }
+            byte[] b = new byte[totalSize];
+            int pos = 0;
+            for(int i=0;i<r.length;i++){
+                int c = ccc(r.length, i);
+                int size = (int) ((r[i]&MASK_SIZE)>>>48) -c;
+                log.getDataInput((r[i] & MASK_OFFSET) + c, size).readFully(b,pos,size);
+                pos+=size;
+            }
+            if(pos!=totalSize)throw new InternalError();
+
+            return serializer.deserialize(new DataInput2(b),totalSize);
+        }
+    }
+
+    @Override
+    public <A> void update(long recid, A value, Serializer<A> serializer) {
+        DataOutput2 out = serialize(value, serializer);
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.writeLock(locks,recid);
+        try{
+            final long[] physPos;
+            final long[] logPos;
+            structuralLock.lock();
+            try{
+                openLogIfNeeded();
+                //first get space in phys
+                physPos = physAllocate(out.pos,false);
+                //now get space in log
+                logPos = logAllocate(physPos);
+
+            }finally{
+                structuralLock.unlock();
+            }
+
+            //write data into log
+            walIndexVal((logPos[0]&MASK_OFFSET) - 1-8-8-1-8, ioRecid, physPos[0]);
+            walPhysArray(out, physPos, logPos);
+
+            modified.put(ioRecid,logPos);
+        }finally{
+            Utils.writeUnlock(locks,recid);
+        }
+    }
+
+    @Override
+    public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.writeLock(locks,recid);
+        try{
+
+            A oldVal = get2(ioRecid,serializer);
+            if((oldVal == null && expectedOldValue!=null) || (oldVal!=null && !oldVal.equals(expectedOldValue)))
+                return false;
+
+            DataOutput2 out = serialize(newValue, serializer);
+
+            final long[] physPos;
+            final long[] logPos;
+            structuralLock.lock();
+            try{
+                openLogIfNeeded();
+                //first get space in phys
+                physPos = physAllocate(out.pos,false);
+                //now get space in log
+                logPos = logAllocate(physPos);
+
+            }finally{
+                structuralLock.unlock();
+            }
+
+            //write data into log
+            walIndexVal((logPos[0]&MASK_OFFSET) - 1-8-8-1-8, ioRecid, physPos[0]);
+            walPhysArray(out, physPos, logPos);
+
+            modified.put(ioRecid,logPos);
+            return true;
+        }catch(IOException e){
+            throw new IOError(e);
+        }finally{
+            Utils.writeUnlock(locks,recid);
+        }
+
+    }
+
+    @Override
+    public <A> void delete(long recid, Serializer<A> serializer) {
+        final long ioRecid = IO_USER_START + recid*8;
+        Utils.writeLock(locks,recid);
+        try{
+            structuralLock.lock();
+            final long logPos;
+            try{
+                openLogIfNeeded();
+                logPos = logSize;
+                checkLogRounding();
+                logSize+=1+8+8; //space used for index val
+                log.ensureAvailable(logSize);
+
+            }finally {
+                structuralLock.unlock();
+            }
+            walIndexVal(logPos,ioRecid,0);
+            modified.put(ioRecid,TOMBSTONE);
+        }finally {
+            Utils.writeUnlock(locks,recid);
+        }
+        }
+
+    @Override
+    public void commit() {
+        structuralLock.lock();
+        Utils.writeLockAll(locks);
+        try{
+            if(log==null) return; //no modifications
+            //update physical and logical filesize
+
+            log.ensureAvailable(logSize + 17 + 17 + 1);
+            walIndexVal(logSize,IO_PHYS_SIZE, physSize);
+            logSize+=17;
+            walIndexVal(logSize,IO_INDEX_SIZE, indexSize);
+            logSize+=17;
+            //seal log file
+            log.putByte(logSize, WAL_SEAL);
+            logSize+=1;
+            //flush log file
+            log.sync();
+            //and write mark it was sealed
+            log.putLong(8, LOG_SEAL);
+            log.sync();
+
+            replayLogFile();
+            reloadIndexFile();
+
+        }finally {
+            Utils.writeUnlockAll(locks);
+            structuralLock.unlock();
+        }
+    }
+
+    protected void replayLogFile(){
+
+        logSize = 0;
+
+        if(log !=null){
+            log.sync();
+        }
+
+
+        //read headers
+        if(log.isEmpty() || log.getLong(0)!=HEADER || log.getLong(8) !=LOG_SEAL){
+            //wrong headers, discard log
+            log.close();
+            log.deleteFile();
+            log = null;
+            return;
+        }
+
+
+        //all good, start replay
+        logSize=16;
+        byte ins = log.getByte(logSize);
+        logSize+=1;
+
+        while(ins!=WAL_SEAL){
+            if(ins == WAL_INDEX_LONG){
+                long ioRecid = log.getLong(logSize);
+                logSize+=8;
+                long indexVal = log.getLong(logSize);
+                logSize+=8;
+                index.ensureAvailable(ioRecid+8);
+                index.putLong(ioRecid, indexVal);
+            }else if(ins == WAL_PHYS_LONG){
+                long offset = log.getLong(logSize);
+                logSize+=8;
+                long val = log.getLong(logSize);
+                logSize+=8;
+                phys.ensureAvailable(offset+8);
+                phys.putLong(offset,val);
+            }else if(ins == WAL_PHYS_SIX_LONG){
+                long offset = log.getLong(logSize);
+                logSize+=8;
+                long val = log.getSixLong(logSize);
+                logSize+=6;
+                phys.ensureAvailable(offset+6);
+                phys.putSixLong(offset, val);
+            }else if(ins == WAL_PHYS_ARRAY){
+                long offset = log.getLong(logSize);
+                logSize+=8;
+                final int size = (int) ((offset&MASK_SIZE)>>>48);
+                offset = offset&MASK_OFFSET;
+
+                //transfer byte[] directly from log file without copying into memory
+                DataInput2 input = log.getDataInput(logSize, size);
+                synchronized (input.buf){
+                    input.buf.position(input.pos);
+                    input.buf.limit(input.pos+size);
+                    phys.ensureAvailable(offset+size);
+                    phys.putData(offset, input.buf);
+                    input.buf.clear();
+                }
+                logSize+=size;
+            }else if(ins == WAL_SKIP_REST_OF_BLOCK){
+                logSize += Volume.BUF_SIZE-logSize%Volume.BUF_SIZE;
+            }else{
+                throw new InternalError("unknown trans log instruction: "+ins +" at log offset: "+(logSize-1));
+            }
+
+            ins = log.getByte(logSize);
+            logSize+=1;
+        }
+        logSize=0;
+
+        //flush dbs
+        phys.sync();
+        index.sync();
+        //and discard log
+        log.putLong(0, 0);
+        log.putLong(8, 0); //destroy seal to prevent log file from being replayed
+        log.close();
+        log.deleteFile();
+        log = null;
+    }
+
+
+
+    @Override
+    public void rollback() throws UnsupportedOperationException {
+        structuralLock.lock();
+        Utils.writeLockAll(locks);
+        try{
+            //discard trans log
+            if(log !=null){
+                log.close();
+                log.deleteFile();
+                log = null;
+            }
+
+            reloadIndexFile();
+        }finally {
+            Utils.writeUnlockAll(locks);
+            structuralLock.unlock();
+        }
+    }
+
+    @Override
+    protected long longStackTake(long ioList) {
+        return 0;
+    }
+
+    @Override
+    protected void longStackPut(long ioList, long offset) {
+        //TODO long stack in WAL
+    }
+
+    @Override
+    public void close() {
+        structuralLock.lock();
+        Utils.writeLockAll(locks);
+        try{
+            if(log !=null){
+                log.sync();
+                log.close();
+                if(deleteFilesAfterClose){
+                    log.deleteFile();
+                }
+            }
+
+            index.sync();
+            phys.sync();
+
+            index.close();
+            phys.close();
+            if(deleteFilesAfterClose){
+                index.deleteFile();
+                phys.deleteFile();
+            }
+            index = null;
+            phys = null;
+        }finally {
+            Utils.writeUnlockAll(locks);
+            structuralLock.unlock();
+        }
+    }
+}
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Utils.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Utils.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Utils.java	(revision 29484)
@@ -21,4 +21,7 @@
 import java.util.*;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.LockSupport;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.logging.Logger;
 
@@ -188,6 +191,6 @@
             File index = File.createTempFile("mapdb","db");
             index.deleteOnExit();
-            new File(index.getPath()+StorageDirect.DATA_FILE_EXT).deleteOnExit();
-            new File(index.getPath()+ StorageJournaled.TRANS_LOG_FILE_EXT).deleteOnExit();
+            new File(index.getPath()+ StoreDirect.DATA_FILE_EXT).deleteOnExit();
+            new File(index.getPath()+ StoreWAL.TRANS_LOG_FILE_EXT).deleteOnExit();
 
             return index;
@@ -233,26 +236,28 @@
     }
 
-    public static void printer(final AtomicLong value){
-        new Thread("printer"){
+    public static void printProgress(final AtomicLong value){
+        new Thread("printProgress"){
             {
                 setDaemon(true);
             }
-
 
             @Override
             public void run() {
                 long startValue = value.get();
-                long startTime = System.currentTimeMillis();
+                long startTime, time = System.currentTimeMillis();
+                startTime = time;
                 long old = value.get();
                 while(true){
-
-                    try {
-                        Thread.sleep(1000);
-                    } catch (InterruptedException e) {
+                    time+=1000;
+                    while(time>System.currentTimeMillis()){
+                        LockSupport.parkNanos(1000*1000); //1ms
+                    }
+
+                    long current = value.get();
+                    if(current<0){
+                        System.out.println("Finished, total time: "+(time-startTime)+", aprox items: "+old);
                         return;
                     }
-
-                    long current = value.get();
-                    long totalSpeed = 1000*(current-startValue)/(System.currentTimeMillis()-startTime);
+                    long totalSpeed = 1000*(current-startValue)/(time-startTime);
                     System.out.print("total: "+current+" - items per last second: "+(current-old)+" - avg items per second: "+totalSpeed+"\r");
                     old = current;
@@ -263,3 +268,109 @@
     }
 
+    public static <A> DataOutput2 serializer(Serializer<A> serializer, A value) {
+        try{
+            DataOutput2 out = new DataOutput2();
+            serializer.serialize(out,value);
+            return out;
+        }catch(IOException e){
+            throw new IOError(e);
+        }
+
+    }
+
+    public static String randomString(int size) {
+        String chars = "0123456789abcdefghijklmnopqrstuvwxyz !@#$%^&*()_+=-{}[]:\",./<>?|\\";
+        StringBuilder b = new StringBuilder(size);
+        for(int i=0;i<size;i++){
+            b.append(chars.charAt(RANDOM.nextInt(chars.length())));
+        }
+        return b.toString();
+    }
+
+    public static ReentrantReadWriteLock[] newReadWriteLocks(int size) {
+        ReentrantReadWriteLock[] locks = new ReentrantReadWriteLock[size];
+        for(int i=0;i<locks.length;i++) locks[i] = new ReentrantReadWriteLock();
+        return locks;
+    }
+
+    public static ReentrantLock[] newLocks(int size) {
+        ReentrantLock[] locks = new ReentrantLock[size];
+        for(int i=0;i<locks.length;i++) locks[i] = new ReentrantLock();
+        return locks;
+    }
+
+    public static void lock(ReentrantLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].lock();
+    }
+
+    public static void lockAll(ReentrantLock[] locks) {
+        for(ReentrantLock lock:locks)lock.lock();
+    }
+
+    public static void unlockAll(ReentrantLock[] locks) {
+        for(ReentrantLock lock:locks)lock.unlock();
+    }
+
+
+    public static void unlock(ReentrantLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].unlock();
+    }
+
+
+    public static void readLock(ReentrantReadWriteLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].readLock().lock();
+    }
+
+    public static void readUnlock(ReentrantReadWriteLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].readLock().unlock();
+    }
+
+    public static void writeLock(ReentrantReadWriteLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].writeLock().lock();
+    }
+
+    public static void writeUnlock(ReentrantReadWriteLock[] locks, long recid) {
+        locks[Utils.longHash(recid)%locks.length].writeLock().unlock();
+    }
+
+    public static void writeLockAll(ReentrantReadWriteLock[] locks) {
+        for(ReentrantReadWriteLock l:locks) l.writeLock().lock();
+    }
+
+    public static void writeUnlockAll(ReentrantReadWriteLock[] locks) {
+        for(ReentrantReadWriteLock l:locks) l.writeLock().unlock();
+    }
+
+
+    public static void lock(LongConcurrentHashMap<Thread> locks, long recid){
+        //feel free to rewrite, if you know better (more efficient) way
+        if(locks.get(recid)==Thread.currentThread()){
+            //check node is not already locked by this thread
+            throw new InternalError("node already locked by current thread: "+recid);
+        }
+
+        while(locks.putIfAbsent(recid, Thread.currentThread()) != null){
+            LockSupport.parkNanos(10);
+        }
+    }
+
+
+
+    public static void unlock(LongConcurrentHashMap<Thread> locks,final long recid) {
+        final Thread t = locks.remove(recid);
+        if(t!=Thread.currentThread())
+            throw new InternalError("unlocked wrong thread");
+
+    }
+
+    public static void assertNoLocks(LongConcurrentHashMap<Thread> locks){
+        if(CC.PARANOID){
+            LongMap.LongMapIterator<Thread> i = locks.longMapIterator();
+            while(i.moveToNext()){
+                if(i.value()==Thread.currentThread()){
+                    throw new InternalError("Node "+i.key()+" is still locked");
+                }
+            }
+        }
+    }
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Volume.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Volume.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/mapdb/Volume.java	(revision 29484)
@@ -24,8 +24,8 @@
 import java.nio.ByteBuffer;
 import java.nio.MappedByteBuffer;
-import java.nio.channels.AsynchronousFileChannel;
 import java.nio.channels.FileChannel;
-import java.nio.file.StandardOpenOption;
 import java.util.Arrays;
+import java.util.Map;
+import java.util.WeakHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -34,5 +34,10 @@
 
 /**
- * MapDB abstraction over raw storage (file, disk partition, memory etc...)
+ * MapDB abstraction over raw storage (file, disk partition, memory etc...).
+ * <p/>
+ * Implementations needs to be thread safe (especially
+ 'ensureAvailable') operation.
+ * However updates do not have to be atomic, it is clients responsibility
+ * to ensure two threads are not writing/reading into the same location.
  *
  * @author Jan Kotek
@@ -41,5 +46,4 @@
 
     public static final int BUF_SIZE = 1<<30;
-    public static final int INITIAL_SIZE = 1024*32;
 
     abstract public void ensureAvailable(final long offset);
@@ -49,5 +53,5 @@
     abstract public void putByte(final long offset, final byte value);
 
-    abstract public void putData(final long offset, final byte[] value, int size);
+    abstract public void putData(final long offset, final byte[] src, int srcPos, int srcSize);
     abstract public void putData(final long offset, final ByteBuffer buf);
 
@@ -88,4 +92,35 @@
         putByte(offset, (byte)(b & 0xff));
     }
+
+    /**
+     * Reads a long from the indicated position
+     */
+    public final long getSixLong(long pos) {
+        return
+                ((long) (getByte(pos + 0) & 0xff) << 40) |
+                        ((long) (getByte(pos + 1) & 0xff) << 32) |
+                        ((long) (getByte(pos + 2) & 0xff) << 24) |
+                        ((long) (getByte(pos + 3) & 0xff) << 16) |
+                        ((long) (getByte(pos + 4) & 0xff) << 8) |
+                        ((long) (getByte(pos + 5) & 0xff) << 0);
+    }
+
+    /**
+     * Writes a long to the indicated position
+     */
+    public final void putSixLong(long pos, long value) {
+        if(value<0) throw new IllegalArgumentException();
+    	if(value >> (6*8)!=0)
+    		throw new IllegalArgumentException("does not fit");
+        //TODO read/write as integer+short, might be faster
+        putByte(pos + 0, (byte) (0xff & (value >> 40)));
+        putByte(pos + 1, (byte) (0xff & (value >> 32)));
+        putByte(pos + 2, (byte) (0xff & (value >> 24)));
+        putByte(pos + 3, (byte) (0xff & (value >> 16)));
+        putByte(pos + 4, (byte) (0xff & (value >> 8)));
+        putByte(pos + 5, (byte) (0xff & (value >> 0)));
+
+    }
+
 
     /** returns underlying file if it exists */
@@ -111,6 +146,6 @@
     public static Factory fileFactory(final boolean readOnly, final boolean RAF, final File indexFile){
         return fileFactory(readOnly, RAF, indexFile,
-                new File(indexFile.getPath() + StorageDirect.DATA_FILE_EXT),
-                new File(indexFile.getPath() + StorageJournaled.TRANS_LOG_FILE_EXT));
+                new File(indexFile.getPath() + StoreDirect.DATA_FILE_EXT),
+                new File(indexFile.getPath() + StoreWAL.TRANS_LOG_FILE_EXT));
     }
 
@@ -142,13 +177,13 @@
         return new Factory() {
 
-            @Override public Volume createIndexVolume() {
+            @Override public synchronized  Volume createIndexVolume() {
                 return new MemoryVol(useDirectBuffer);
             }
 
-            @Override public Volume createPhysVolume() {
+            @Override public synchronized Volume createPhysVolume() {
                 return new MemoryVol(useDirectBuffer);
             }
 
-            @Override public Volume createTransLogVolume() {
+            @Override public synchronized Volume createTransLogVolume() {
                 return new MemoryVol(useDirectBuffer);
             }
@@ -181,6 +216,7 @@
             //check for most common case, this is already mapped
             if(buffersPos<buffers.length && buffers[buffersPos]!=null &&
-                    buffers[buffersPos].capacity()>=offset% BUF_SIZE)
+                    buffers[buffersPos].capacity()>=offset% BUF_SIZE){
                 return;
+            }
 
             growLock.lock();
@@ -191,16 +227,31 @@
                     return;
 
+                ByteBuffer[] buffers2 = buffers;
 
                 //grow array if necessary
-                if(buffersPos>=buffers.length){
-                    buffers = Arrays.copyOf(buffers, Math.max(buffersPos, buffers.length * 2));
-                }
+                if(buffersPos>=buffers2.length){
+                    buffers2 = Arrays.copyOf(buffers2, Math.max(buffersPos+1, buffers2.length * 2));
+                }
+
 
                 //just remap file buffer
-                ByteBuffer newBuf = makeNewBuffer(offset);
+                if( buffers2[buffersPos] == null){
+                    //make sure previous buffer is fully expanded
+                    if(buffersPos>0){
+                        ByteBuffer oldPrev = buffers2[buffersPos-1];
+                        if(oldPrev == null || oldPrev.capacity()!=BUF_SIZE){
+                            buffers2[buffersPos-1]  = makeNewBuffer(1L*buffersPos*BUF_SIZE-1,buffers2);
+                        }
+                    }
+                }
+
+
+                ByteBuffer newBuf = makeNewBuffer(offset, buffers2);
                 if(readOnly)
                     newBuf = newBuf.asReadOnlyBuffer();
 
-                buffers[buffersPos] = newBuf;
+                buffers2[buffersPos] = newBuf;
+
+                buffers = buffers2;
             }finally{
                 growLock.unlock();
@@ -208,5 +259,5 @@
         }
 
-        protected abstract ByteBuffer makeNewBuffer(long offset);
+        protected abstract ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2);
 
         protected final ByteBuffer internalByteBuffer(long offset) {
@@ -233,5 +284,5 @@
 
 
-        @Override public final void putData(final long offset, final byte[] value, final int size) {
+        @Override public void putData(final long offset, final byte[] src, int srcPos, int srcSize){
             final ByteBuffer b1 = internalByteBuffer(offset);
             final int bufPos = (int) (offset% BUF_SIZE);
@@ -239,5 +290,5 @@
             synchronized (b1){
                 b1.position(bufPos);
-                b1.put(value, 0, size);
+                b1.put(src, srcPos, srcSize);
             }
         }
@@ -264,4 +315,7 @@
             try{
                 return internalByteBuffer(offset).getInt((int) (offset% BUF_SIZE));
+            } catch (NullPointerException e) {
+                throw new RuntimeException(""+offset,e);
+
             }catch(IndexOutOfBoundsException e){
                 throw new IOError(new EOFException());
@@ -346,4 +400,7 @@
         protected final FileChannel.MapMode mapMode;
         protected final java.io.RandomAccessFile raf;
+
+        protected final Map<ByteBuffer, String> unreleasedBuffers =
+                Utils.isWindows() ? new WeakHashMap<ByteBuffer, String>() : null;
 
         static final int BUF_SIZE_INC = 1024*1024;
@@ -370,7 +427,7 @@
                 }else{
                     buffers = new ByteBuffer[1];
-                    buffers[0] = fileChannel.map(mapMode, 0, INITIAL_SIZE);
-                    if(mapMode == FileChannel.MapMode.READ_ONLY)
-                        buffers[0] = buffers[0].asReadOnlyBuffer();
+//                    buffers[0] = fileChannel.map(mapMode, 0, INITIAL_SIZE);
+//                    if(mapMode == FileChannel.MapMode.READ_ONLY)
+//                        buffers[0] = buffers[0].asReadOnlyBuffer();
 
                 }
@@ -394,4 +451,12 @@
                 }
                 buffers = null;
+                if(unreleasedBuffers!=null){
+                    for(ByteBuffer b:unreleasedBuffers.keySet().toArray(new MappedByteBuffer[0])){
+                        if(b!=null && (b instanceof MappedByteBuffer)){
+                            unmap((MappedByteBuffer) b);
+                        }
+                    }
+                }
+
             } catch (IOException e) {
                 throw new IOError(e);
@@ -428,18 +493,11 @@
 
         @Override
-        protected ByteBuffer makeNewBuffer(long offset) {
-            try {
-                //unmap old buffer on windows
-                int bufPos = (int) (offset/BUF_SIZE);
-                if(bufPos<buffers.length && buffers[bufPos]!=null){
-                    unmap((MappedByteBuffer) buffers[bufPos]);
-                    buffers[bufPos] = null;
-                }
-
+        protected ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2) {
+            try {
                 long newBufSize =  offset% BUF_SIZE;
                 newBufSize = newBufSize + newBufSize%BUF_SIZE_INC; //round to BUF_SIZE_INC
-                return fileChannel.map(
-                        mapMode,
-                        offset - offset% BUF_SIZE, newBufSize );
+                ByteBuffer buf =  fileChannel.map( mapMode, offset - offset% BUF_SIZE, newBufSize );
+                if(unreleasedBuffers!=null) unreleasedBuffers.put(buf, "");
+                return buf;
             } catch (IOException e) {
                 if(e.getCause()!=null && e.getCause() instanceof OutOfMemoryError){
@@ -463,11 +521,12 @@
             super(false);
             this.useDirectBuffer = useDirectBuffer;
-            ByteBuffer b0 = useDirectBuffer?
-                    ByteBuffer.allocateDirect(INITIAL_SIZE) :
-                    ByteBuffer.allocate(INITIAL_SIZE);
-            buffers = new ByteBuffer[]{b0};
-        }
-
-        @Override protected ByteBuffer makeNewBuffer(long offset) {
+//            ByteBuffer b0 = useDirectBuffer?
+//                    ByteBuffer.allocateDirect(INITIAL_SIZE) :
+//                    ByteBuffer.allocate(INITIAL_SIZE);
+//            buffers = new ByteBuffer[]{b0};
+            buffers=new ByteBuffer[1];
+        }
+
+        @Override protected ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2) {
             final int newBufSize = Utils.nextPowTwo((int) (offset % BUF_SIZE));
             //double size of existing in-memory-buffer
@@ -476,5 +535,5 @@
                     ByteBuffer.allocate(newBufSize);
             final int buffersPos = (int) (offset/ BUF_SIZE);
-            final ByteBuffer oldBuffer = buffers[buffersPos];
+            final ByteBuffer oldBuffer = buffers2[buffersPos];
             if(oldBuffer!=null){
                 //copy old buffer if it exists
@@ -580,11 +639,11 @@
 
         @Override
-        synchronized public void putData(long offset, byte[] value, int size) {
-            try {
-                if(pos!=offset){
-                    raf.seek(offset);
-                }
-                pos=offset+size;
-                raf.write(value,0,size);
+        synchronized public void putData(final long offset, final byte[] src, int srcPos, int srcSize){
+            try {
+                if(pos!=offset){
+                    raf.seek(offset);
+                }
+                pos=offset+srcSize;
+                raf.write(src,srcPos,srcSize);
             } catch (IOException e) {
                 throw new IOError(e);
@@ -602,5 +661,5 @@
                 byte[] b = new byte[size];
                 buf.get(b);
-                putData(offset, b, size);
+                putData(offset, b, 0, size);
             } catch (IOException e) {
                 throw new IOError(e);
@@ -708,148 +767,4 @@
     }
 
-    public static class AsyncFileChannelVol extends Volume{
-
-
-        protected AsynchronousFileChannel channel;
-        protected final boolean readOnly;
-        protected final File file;
-
-        public AsyncFileChannelVol(File file, boolean readOnly){
-            this.readOnly = readOnly;
-            this.file = file;
-            try {
-                this.channel = readOnly?
-                        AsynchronousFileChannel.open(file.toPath(),StandardOpenOption.READ):
-                        AsynchronousFileChannel.open(file.toPath(),StandardOpenOption.READ, StandardOpenOption.WRITE);
-
-            } catch (IOException e) {
-                throw new IOError(e);
-            }
-        }
-
-        @Override
-        public void ensureAvailable(long offset) {
-            //we do not have a list of ByteBuffers, so ensure size does not have to do anything
-        }
-
-
-
-        protected void await(Future<Integer> future, int size) {
-            try {
-                int res = future.get();
-                if(res!=size) throw new InternalError("not enough bytes");
-            } catch (InterruptedException e) {
-                throw new RuntimeException(e);
-            } catch (ExecutionException e) {
-                throw new RuntimeException(e);
-            }
-        }
-
-        @Override
-        public void putByte(long offset, byte value) {
-            ByteBuffer b = ByteBuffer.allocate(1);
-            b.put(0, value);
-            await(channel.write(b, offset),1);
-        }
-        @Override
-        public void putInt(long offset, int value) {
-            ByteBuffer b = ByteBuffer.allocate(4);
-            b.putInt(0, value);
-            await(channel.write(b, offset),4);
-        }
-
-        @Override
-        public void putLong(long offset, long value) {
-            ByteBuffer b = ByteBuffer.allocate(8);
-            b.putLong(0, value);
-            await(channel.write(b, offset),8);
-        }
-
-        @Override
-        public void putData(long offset, byte[] value, int size) {
-            ByteBuffer b = ByteBuffer.wrap(value);
-            b.limit(size);
-            await(channel.write(b,offset),size);
-        }
-
-        @Override
-        public void putData(long offset, ByteBuffer buf) {
-            await(channel.write(buf,offset), buf.limit() - buf.position());
-        }
-
-
-
-        @Override
-        public long getLong(long offset) {
-            ByteBuffer b = ByteBuffer.allocate(8);
-            await(channel.read(b, offset), 8);
-            b.rewind();
-            return b.getLong();
-        }
-
-        @Override
-        public byte getByte(long offset) {
-            ByteBuffer b = ByteBuffer.allocate(1);
-            await(channel.read(b, offset), 1);
-            b.rewind();
-            return b.get();
-        }
-
-        @Override
-        public int getInt(long offset) {
-            ByteBuffer b = ByteBuffer.allocate(4);
-            await(channel.read(b, offset), 4);
-            b.rewind();
-            return b.getInt();
-        }
-
-
-
-        @Override
-        public DataInput2 getDataInput(long offset, int size) {
-            ByteBuffer b = ByteBuffer.allocate(size);
-            await(channel.read(b, offset), size);
-            b.rewind();
-            return new DataInput2(b,0);
-        }
-
-        @Override
-        public void close() {
-            try {
-                channel.close();
-            } catch (IOException e) {
-                throw new IOError(e);
-            }
-        }
-
-        @Override
-        public void sync() {
-            try {
-                channel.force(true);
-            } catch (IOException e) {
-                throw new IOError(e);
-            }
-        }
-
-        @Override
-        public boolean isEmpty() {
-            return file.length()>0;
-        }
-
-        @Override
-        public void deleteFile() {
-            file.delete();
-        }
-
-        @Override
-        public boolean isSliced() {
-            return false;
-        }
-
-        @Override
-        public File getFile() {
-            return file;
-        }
-    }
 
 
Index: /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/DBTile.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/DBTile.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/DBTile.java	(revision 29484)
@@ -0,0 +1,24 @@
+package org.openstreetmap.josm.plugins.imagerycache;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Serializable class to store one tile
+ */
+public class DBTile implements Serializable {
+    public byte[] data;
+    public Map<String, String> metaData;
+    public long lastModified;
+
+    public DBTile(DBTile dbTile) {
+        data = dbTile.data.clone();
+        metaData = new HashMap<String, String>(dbTile.metaData);
+        lastModified = dbTile.lastModified;
+    }
+
+    public DBTile() {
+    }
+    
+}
Index: /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/ImageryCachePlugin.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/ImageryCachePlugin.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/ImageryCachePlugin.java	(revision 29484)
@@ -17,4 +17,10 @@
         public OsmTileLoader makeTileLoader(TileLoaderListener listener) {
             String cachePath = TMSLayer.PROP_TILECACHE_DIR.get();
+            try {
+                new File(cachePath).mkdirs();
+            } catch (Exception e) {
+                cachePath=".";
+            }
+            
             if (cachePath != null && !cachePath.isEmpty()) {
                 return new OsmDBTilesLoader(listener, new File(cachePath));
@@ -29,3 +35,6 @@
     }
     
+    public static void main(String[] args) {
+        System.out.println("Debugging code for ImageryAdjust plugin");
+    }
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/OsmDBTilesLoader.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/OsmDBTilesLoader.java	(revision 29483)
+++ /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/OsmDBTilesLoader.java	(revision 29484)
@@ -6,13 +6,9 @@
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.Serializable;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URLConnection;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
-import org.mapdb.DB;
-import org.mapdb.DBMaker;
 import org.openstreetmap.gui.jmapviewer.JobDispatcher;
 import org.openstreetmap.gui.jmapviewer.OsmTileLoader;
@@ -22,4 +18,5 @@
 import org.openstreetmap.gui.jmapviewer.interfaces.TileSource;
 import org.openstreetmap.gui.jmapviewer.interfaces.TileSource.TileUpdate;
+import org.openstreetmap.josm.Main;
 import org.openstreetmap.josm.data.preferences.BooleanProperty;
 
@@ -35,77 +32,6 @@
     
     public static final boolean debug = new BooleanProperty("imagerycache.debug", false).get();
-
-    static class TileDAOMapDB {
-        protected HashMap<String, DB> dbs = new HashMap<String, DB>();
-        protected HashMap<String, Map<Long,DBTile>> storages  = new HashMap<String, Map<Long,DBTile>>();
-        private final File cacheFolder;
-        
-        /**
-         * Lazy creation of DB object associated to * @param source
-         * or returning from cache
-         */
-        private synchronized DB getDB(String source) {
-            DB db = dbs.get(source);
-            if (db==null) {
-                try {
-                db = DBMaker
-                    .newFileDB(new File(cacheFolder, "tiles_"+source.replaceAll("[\\\\/:*?\"<>| ]", "_")))
-                    .randomAccessFileEnableIfNeeded()
-                    .journalDisable()
-                    .closeOnJvmShutdown()
-                    .make();
-                dbs.put(source, db);
-                } catch (Exception e) {
-                    System.out.println("Error: Can not create MapDB file");
-                    e.printStackTrace(System.out);
-                }
-            }
-            return db;
-        }
-
-        private synchronized Map<Long,DBTile> getStorage(String source) {
-            Map<Long, DBTile> m = storages.get(source);
-            if (m == null) {
-                try {
-                    DB d = getDB(source);
-                    m = d.getHashMap("tiles");
-                    storages.put(source, m);
-                    if (debug) System.out.println("Created storage "+source);
-                } catch (Exception e) {
-                    System.out.println("Error: Can not create HashMap in MapDB storage");
-                    e.printStackTrace(System.out);
-                }
-            }
-            return m;
-        }
-        
-        public TileDAOMapDB(File cacheFolder) {
-            this.cacheFolder = cacheFolder;
-        }
-        
-                
-        DBTile getById(String source, long id) {
-            return getStorage(source).get(id);
-        }
-
-        protected void updateModTime(String source, long id, DBTile dbTile) {
-            if (debug) System.out.println("Tile "+id+": Updating modification time");
-            getStorage(source).put(id, dbTile);
-        }
-
-        protected void updateTile(String source, long id, DBTile dbTile) {
-            if (debug) System.out.println("Tile "+id+": Updating tile in base");
-            getStorage(source).put(id, dbTile);
-        }
-
-        protected void deleteTile(String source, long id) {
-            getStorage(source).remove(id);
-        }
-
-
-    }
             
     TileDAOMapDB dao;
-                        
    
     protected long maxCacheFileAge = FILE_AGE_ONE_WEEK;
@@ -115,5 +41,6 @@
     public OsmDBTilesLoader(TileLoaderListener smap, File cacheFolder) {
         super(smap);
-        dao = new TileDAOMapDB(cacheFolder);
+        dao = TileDAOMapDB.getInstance();
+        dao.setCacheFolder(cacheFolder);
     }
     
@@ -122,10 +49,4 @@
         return new DatabaseLoadJob(tile);
     }
-    
-    static class DBTile implements Serializable {
-        byte data[];
-        Map<String, String> metaData;
-        long lastModified;
-    }
 
     protected class DatabaseLoadJob implements TileJob {
@@ -133,7 +54,10 @@
         private final Tile tile;
         File tileCacheDir;
+        
+        /**
+         * Stores the tile loaded from database, null if nothing found. 
+         */
         DBTile dbTile = null;
         long fileAge = 0;
-        boolean fileTilePainted = false;
         
         long id;
@@ -161,10 +85,10 @@
                 return;
             }
-            if (fileTilePainted) {
+            if (dbTile != null) {
                 TileJob job = new TileJob() {
-                    public void run() {
-                        loadOrUpdateTile();
-                    }
-                    public Tile getTile() {
+                    @Override public void run() {
+                        loadOrUpdateTileFromServer();
+                    }
+                    @Override public Tile getTile() {
                         return tile;
                     }
@@ -172,8 +96,13 @@
                 JobDispatcher.getInstance().addJob(job);
             } else {
-                loadOrUpdateTile();
-            }
-        }
-
+                loadOrUpdateTileFromServer();
+            }
+        }
+
+        /**
+         * Loads tile from database. 
+         * There can be dbTile != null but the tile is outdated and reload is still needed
+         * @return true if no loading from server is needed.
+         */
         private boolean loadTileFromFile() {
             ByteArrayInputStream bin = null;
@@ -182,11 +111,12 @@
                 
                 if (dbTile == null) return false;
+                
+                loadMetadata(); 
+                if (debug) System.out.println(id+": found in cache, metadata ="+dbTile.metaData);
 
                 if ("no-tile".equals(tile.getValue("tile-info")))
                 {
                     tile.setError("No tile at this zoom level");
-                    if (dbTile!=null) {
-                        dao.deleteTile(sourceName, id);
-                    }
+                    dao.deleteTile(sourceName, id);
                 } else {
                     bin = new ByteArrayInputStream(dbTile.data);
@@ -202,10 +132,12 @@
                     tile.setLoaded(true);
                     listener.tileLoadingFinished(tile, true);
-                    fileTilePainted = true;
-                    return true;
-                }
-                listener.tileLoadingFinished(tile, true);
-                fileTilePainted = true;
+                    return true; // tile loaded
+                } else {
+                    listener.tileLoadingFinished(tile, true);
+                    return false; // Tile is loaded, but too old. Should be reloaded from server
+                }
             } catch (Exception e) {
+                System.out.println("Error: Can not load tile from database: "+sourceName+":"+id);
+                e.printStackTrace(System.out);
                 try {
                     if (bin != null) {
@@ -213,10 +145,13 @@
                         dao.deleteTile(sourceName, id);
                     }
-                } catch (Exception e1) {
-                }
+                } catch (Exception e1) {   }
                 dbTile = null;
                 fileAge = 0;
-            }
-            return false;
+                return false; // tile is not because of some error (corrupted database, etc.)
+            } catch (Error e) { // this is bad, bat MapDB throws it
+                System.out.println("Serious database error: Can not load tile from database: "+sourceName+":"+id);
+                e.printStackTrace(System.out);
+                dbTile = null;  fileAge = 0;  return false;                                            
+            }
         }
 
@@ -225,5 +160,5 @@
         }
                 
-        private void loadOrUpdateTile() {
+        private void loadOrUpdateTileFromServer() {
             
             try {
@@ -231,4 +166,6 @@
                 final TileUpdate tileUpdate = tile.getSource().getTileUpdate();
                 if (dbTile != null) {
+                    // MapDB wants simmutable entities
+                    dbTile = new DBTile(dbTile);
                     switch (tileUpdate) {
                     case IfModifiedSince:   // (1)
@@ -276,5 +213,5 @@
                 loadTileMetadata(tile, urlConn);
                 dbTile.metaData = tile.getMetadata();
-
+                
                 if ("no-tile".equals(tile.getValue("tile-info")))
                 {
@@ -305,5 +242,5 @@
                 listener.tileLoadingFinished(tile, false);
                 try {
-                    System.out.println("Tile "+id+": Error: Failed loading from "+tile.getUrl());
+                    System.out.println("Error: Tile "+id+" can not be loaded from"+tile.getUrl());
                     e.printStackTrace(System.out);
                 } catch(IOException i) {
@@ -381,5 +318,14 @@
         }
 
+        /**
+         * Loads attribute map from dbTile to tile
+         */
+        private void loadMetadata() {
+            Map<String,String> m = dbTile.metaData;
+            if (m==null) return;
+            for (String k: m.keySet()) {
+                tile.putValue(k, m.get(k));
+            }
+        }
     }
-    
 }
Index: /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAO.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAO.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAO.java	(revision 29484)
@@ -0,0 +1,12 @@
+package org.openstreetmap.josm.plugins.imagerycache;
+
+/**
+ * Interface that contain methods to work with tile database
+ * each tile is described by source and id (id is 
+ */
+public interface TileDAO {
+    public DBTile getById(String source, long id);
+    public void updateModTime(String source, long id, DBTile dbTile);
+    public void updateTile(String source, long id, DBTile dbTile);
+    public void deleteTile(String source, long id);
+}
Index: /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAOMapDB.java
===================================================================
--- /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAOMapDB.java	(revision 29484)
+++ /applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/TileDAOMapDB.java	(revision 29484)
@@ -0,0 +1,184 @@
+package org.openstreetmap.josm.plugins.imagerycache;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.mapdb.DB;
+import org.mapdb.DBMaker;
+import org.openstreetmap.josm.data.preferences.BooleanProperty;
+
+/**
+ * Class to store tile database in MapDB key-value storage
+ * Used as singleton to share database-accessing onbjects between all tile loaders (all layers, etc.)
+ */
+public class TileDAOMapDB implements TileDAO {
+    public static final boolean debug = new BooleanProperty("imagerycache.debug", false).get();
+    
+    public static boolean dbNotAvailable = false;
+
+    protected HashMap<String, DB> dbs = new HashMap<String, DB>();
+    protected HashMap<String, Map<Long, DBTile>> storages = new HashMap<String, Map<Long, DBTile>>();
+    private File cacheFolder;
+    
+    
+    static TileDAOMapDB instance = new TileDAOMapDB();
+
+    public static TileDAOMapDB getInstance() {
+        return instance;
+    }
+
+    /**
+     * Lazy creation of DB object associated to * @param source
+     * or returning from cache
+     */
+    private synchronized DB initDB(String source) {
+        DB db = dbs.get(source);
+        if (db!=null) return db;
+        
+        File f = null;
+        for (int attempt=0; attempt<20; attempt++) {
+            try {
+                String fname = getDBFileName(source, attempt);
+                
+                
+                f = new File(cacheFolder, fname);
+                f.createNewFile();
+                if (f.exists() && !f.canWrite()) continue;
+         
+                File lock;
+                lock = new File(cacheFolder,  fname+".lock");
+                if (!lock.createNewFile())  continue;
+                lock.deleteOnExit();
+                
+                db = DBMaker.newFileDB(f)
+                    .randomAccessFileEnableIfNeeded()
+                    .writeAheadLogDisable().closeOnJvmShutdown().make();
+                
+                
+                dbs.put(source, db);
+                
+                Map<Long, DBTile> m = db.getHashMap("tiles");
+                storages.put(source, m);
+                
+                System.out.println("Opened database file successfully: "+f.getAbsolutePath());
+            } catch (Exception ex) {
+                System.out.println("Error: can not create database, file: "+f.getAbsolutePath());
+                //System.out.println(ex.getMessage());
+                ex.printStackTrace();
+                try {
+                    if (db!=null) db.close();
+                } catch (Exception e) { };
+                throw new RuntimeException(ex);
+            }
+            mergeSources(source);
+            return db;
+        }
+        
+        // Fallback solution: 
+        db = DBMaker.newMemoryDB().asyncWriteDisable()
+            .writeAheadLogDisable().closeOnJvmShutdown().make();
+
+        dbs.put(source, db);
+
+        Map<Long, DBTile> m = db.getHashMap("tiles");
+        storages.put(source, m);
+        return db;
+        
+    }
+
+    private synchronized Map<Long, DBTile> getStorage(String source) {
+        if (!storages.containsKey(source)) {
+            initDB(source);
+        }
+        Map<Long, DBTile> m = storages.get(source);
+        return m;
+    }
+
+    private TileDAOMapDB() {
+        
+    }
+    
+    void setCacheFolder(File f) {
+        cacheFolder = f;
+    }
+
+    @Override
+    public DBTile getById(String source, long id) {
+        if (dbNotAvailable) return null;
+       return getStorage(source).get(id);
+    }
+
+    @Override
+    public synchronized void updateModTime(String source, long id, DBTile dbTile) {
+        if (debug) System.out.println("Tile " + id + ": Updating modification time");
+        if (dbNotAvailable) return;
+        getStorage(source).put(id, dbTile);
+    }
+
+    @Override
+    public synchronized void  updateTile(String source, long id, DBTile dbTile) {
+        if (debug) System.out.println("Tile " + id + ": Updating tile in base");
+        if (dbNotAvailable) return;
+        getStorage(source).put(id, dbTile);
+    }
+
+    @Override
+    public synchronized void deleteTile(String source, long id) {
+        if (dbNotAvailable) return;
+        getStorage(source).remove(id);
+    }
+    
+    
+
+    private String getDBFileName(String source, int attempt) {
+        String fname = "tiles_" + source.replaceAll("[\\\\/:*?\"<>| ]", "_");
+        if (attempt > 0) fname=fname+"_"+attempt;
+        return fname;
+    }
+    
+    /**
+     * Opens all files related to source @param source  as databases
+     * and move their contents to currently active base
+     * Used for joining the cache from multiple instances of JOSM
+     */
+    private void mergeSources(String source) {
+        DB db = null;
+        Map<Long, DBTile> myMap = storages.get(source);
+        for (int t=0; t<20; t++) {
+            try {
+                String fname = getDBFileName(source, t);
+                File f = new File(cacheFolder, fname);
+                if (!f.exists() || !f.canWrite()) continue;
+                
+                File lock;
+                lock = new File(cacheFolder, fname+".lock");
+                if (lock.exists())  continue;
+                
+                db = DBMaker.newFileDB(f)
+                    .randomAccessFileEnableIfNeeded()
+                    .writeAheadLogDisable().closeOnJvmShutdown().make();
+                
+                
+                Map<Long, DBTile> m = db.getHashMap("tiles");
+                
+                // Merging!
+                System.out.println("Moving records from "+f.getName()+" to open storage "+source);
+                myMap.putAll(m);
+                db.close();
+                new File(cacheFolder, fname+".p").delete();
+                f.delete();
+                
+                System.out.println("Moved database successfully from file "+f.getAbsolutePath());
+            } catch (Exception ex) {
+                System.out.println(ex.getMessage());
+                try {
+                    if (db!=null) db.close();
+                } catch (Exception e) { };
+            }
+        }
+    }
+
+}
