2 * The Apache Software License, Version 1.1
5 * Copyright (c) 1999 The Apache Software Foundation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * 3. The end-user documentation included with the redistribution,
20 * if any, must include the following acknowledgment:
21 * "This product includes software developed by the
22 * Apache Software Foundation (http://www.apache.org/)."
23 * Alternately, this acknowledgment may appear in the software itself,
24 * if and wherever such third-party acknowledgments normally appear.
26 * 4. The names "Xindice" and "Apache Software Foundation" must
27 * not be used to endorse or promote products derived from this
28 * software without prior written permission. For written
29 * permission, please contact apache@apache.org.
31 * 5. Products derived from this software may not be called "Apache",
32 * nor may "Apache" appear in their name, without prior written
33 * permission of the Apache Software Foundation.
35 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
36 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
37 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
38 * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
42 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
43 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
44 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
45 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * ====================================================================
49 * This software consists of voluntary contributions made by many
50 * individuals on behalf of the Apache Software Foundation and was
51 * originally based on software copyright (c) 1999-2001, The dbXML
52 * Group, L.L.C., http://www.dbxmlgroup.com. For more
53 * information on the Apache Software Foundation, please see
54 * <http://www.apache.org/>.
57 package net.jxta.impl.xindice.core.filer;
59 import net.jxta.impl.xindice.core.DBException;
60 import net.jxta.impl.xindice.core.FaultCodes;
61 import net.jxta.impl.xindice.core.data.Key;
62 import net.jxta.impl.xindice.core.data.Value;
64 import java.io.ByteArrayInputStream;
65 import java.io.ByteArrayOutputStream;
66 import java.io.DataInputStream;
67 import java.io.DataOutputStream;
69 import java.io.IOException;
70 import java.io.InputStream;
71 import java.io.OutputStream;
72 import java.io.RandomAccessFile;
73 import java.lang.ref.WeakReference;
74 import java.util.Collection;
75 import java.util.EmptyStackException;
76 import java.util.HashMap;
78 import java.util.Stack;
79 import java.util.WeakHashMap;
80 import java.util.logging.Level;
81 import java.util.logging.Logger;
84 * Paged is a paged file implementation that is foundation for both the
85 * BTree class and the HashFiler. It provides flexible paged I/O and
86 * page caching functionality.
88 * Page has folowing configuration attributes:
90 * <li><strong>pagesize</strong>: Size of the page used by the paged file.
91 * Default page size is 4096 bytes. This parameter can be set only
92 * before paged file is created. Once it is created, this parameter
93 * can not be changed.</li>
94 * <li><strong>maxkeysize</strong>: Maximum allowed size of the key.
95 * Default maximum key size is 256 bytes.</li>
96 * <li><strong>max-descriptors</strong>: Defines maximum amount of
97 * simultaneously opened file descriptors this paged file can have.
98 * Several descriptors are needed to provide multithreaded access
99 * to the underlying file. Too large number will limit amount of
100 * collections you can open. Default value is 16
101 * (DEFAULT_DESCRIPTORS_MAX).</li>
104 * <br>FIXME: Currently it seems that maxkeysize is not used anywhere.
105 * <br>TODO: Introduce Paged interface, implementations.
107 public abstract class Paged {
112 private final static Logger LOG = Logger.getLogger(Paged.class.getName());
115 * The maximum number of pages that will be held in the dirty cache.
116 * Once number reaches the limit, pages are flushed to disk.
118 private static final int MAX_DIRTY_SIZE = 128;
120 // The maximum number of open random access files we can have
121 private static final int DEFAULT_DESCRIPTORS_MAX = 16;
126 protected static final byte UNUSED = 0;
129 * Overflow page status
131 protected static final byte OVERFLOW = 126;
134 * Deleted page status
136 protected static final byte DELETED = 127;
139 * Page ID of non-existent page
141 protected static final int NO_PAGE = -1;
144 * flag whether to sync DB on every write or not.
146 protected boolean sync = true;
148 // TODO: This is not a cache right now, but a way to assure that only one page instance at most exists in memory at all times.
150 * Cache of recently read pages.
152 * Cache contains weak references to the Page objects, keys are page numbers (Long objects).
153 * Access synchronized by this map itself.
155 private final Map<Long, WeakReference<Page>> pages = new WeakHashMap<Long, WeakReference<Page>>();
158 * Cache of modified pages waiting to be written out.
159 * Access is synchronized by the {@link #dirtyLock}.
161 private Map<Long, Page> dirty = new HashMap<Long, Page>();
164 * Lock for synchronizing access to the {@link #dirty} map.
166 private final Object dirtyLock = new Object();
169 * Random access file descriptors cache.
170 * Access to it and to {@link #descriptorsCount} is synchronized by itself.
172 private final Stack<RandomAccessFile> descriptors = new Stack<RandomAccessFile>();
175 * The number of random access file objects that exist, either in the
176 * cache {@link #descriptors}, or currently in use.
178 private int descriptorsCount;
181 * The maximum number of random access file objects that can be opened
182 * by this paged instance.
184 private int descriptorsMax;
187 * Whether the file is opened or not.
189 private boolean opened;
192 * The underlying file where the Paged object stores its pages.
197 * Header of this Paged
199 private final FileHeader fileHeader;
202 descriptorsMax = DEFAULT_DESCRIPTORS_MAX;
203 fileHeader = createFileHeader();
206 public Paged(File file) {
212 * setFile sets the file object for this Paged.
214 * @param file The File
216 protected final void setFile(final File file) {
221 * getFile returns the file object for this Paged.
225 protected final File getFile() {
230 * Obtain RandomAccessFile ('descriptor') object out of the pool.
231 * If no descriptors available, and maximum amount already allocated,
232 * the call will block.
234 * @throws java.io.IOException if an io error occurs
236 protected final RandomAccessFile getDescriptor() throws IOException {
237 synchronized (descriptors) {
238 // If there are descriptors in the cache return one.
239 if (!descriptors.empty()) {
240 return descriptors.pop();
242 // Otherwise we need to get one some other way.
244 // First try to create a new one if there's room
245 if (descriptorsCount < descriptorsMax) {
247 return new RandomAccessFile(file, "rw");
250 // Otherwise we have to wait for one to be released by another thread.
254 return descriptors.pop();
255 } catch (InterruptedException e) {// Ignore, and continue to wait
256 } catch (EmptyStackException e) {// Ignore, and continue to wait
263 * Puts a RandomAccessFile ('descriptor') back into the descriptor pool.
264 * @param raf the file to add
266 protected final void putDescriptor(RandomAccessFile raf) {
268 synchronized (descriptors) {
269 descriptors.push(raf);
270 descriptors.notify();
276 * Closes a RandomAccessFile ('descriptor') and removes it from the pool.
277 * @param raf the file to close
279 protected final void closeDescriptor(RandomAccessFile raf) {
283 } catch (IOException e) {// Ignore close exception
286 // Synchronization is necessary as decrement operation is not atomic
287 synchronized (descriptors) {
294 * getPage returns the page specified by pageNum.
296 * @param pageNum The Page number
297 * @return The requested Page
298 * @throws IOException if an Exception occurs
300 protected final Page getPage(long pageNum) throws IOException {
301 final Long lp = pageNum;
304 synchronized (this) {
305 // Check if it's in the dirty cache
306 // No need to synchronize on dirtyLock thanks to atomic assignment
307 page = dirty.get(lp);
309 // if not check if it's already loaded in the page cache
311 WeakReference<Page> ref = pages.get(lp);
318 // if still not found we need to create it and add it to the page cache.
321 pages.put(page.pageNum, new WeakReference<Page>(page));
325 // Load the page from disk if necessary
331 * readValue reads the multi-Paged Value starting at the specified
334 * @param page The starting Page
336 * @throws IOException if an Exception occurs
338 protected final Value readValue(Page page) throws IOException {
339 final PageHeader sph = page.getPageHeader();
340 ByteArrayOutputStream bos = new ByteArrayOutputStream(sph.getRecordLen());
342 // Loop until we've read all the pages into memory.
346 PageHeader ph = p.getPageHeader();
348 // Add the contents of the page onto the stream
351 // Continue following the list of pages until we get to the end.
352 long nextPage = ph.getNextPage();
354 if (nextPage == NO_PAGE) {
357 p = getPage(nextPage);
360 // Return a Value with the collected contents of all pages.
361 return new Value(bos.toByteArray());
365 * readValue reads the multi-Paged Value starting at the specified
368 * @param page The starting page number
370 * @throws IOException if an Exception occurs
372 protected final Value readValue(long page) throws IOException {
373 return readValue(getPage(page));
377 * writeValue writes the multi-Paged Value starting at the specified
380 * @param page The starting Page
381 * @param value The Value to write
382 * @throws IOException if an Exception occurs
384 protected final void writeValue(Page page, Value value) throws IOException {
386 throw new IOException("Can't write a null value");
389 InputStream is = value.getInputStream();
391 // Write as much as we can onto the primary page.
392 PageHeader hdr = page.getPageHeader();
394 hdr.setRecordLen(value.getLength());
397 // Write out the rest of the value onto any needed overflow pages
398 while (is.available() > 0) {
400 PageHeader lhdr = hdr;
402 // Find an overflow page to use
403 long np = lhdr.getNextPage();
406 // Use an existing page.
409 // Create a new overflow page
410 page = getFreePage();
411 lhdr.setNextPage(page.getPageNum());
414 // Mark the page as an overflow page.
415 hdr = page.getPageHeader();
416 hdr.setStatus(OVERFLOW);
418 // Write some more of the value to the overflow page.
423 // Cleanup any unused overflow pages. i.e. the value is smaller then the
424 // last time it was written.
425 long np = hdr.getNextPage();
431 hdr.setNextPage(NO_PAGE);
436 * writeValue writes the multi-Paged Value starting at the specified
439 * @param page The starting page number
440 * @param value The Value to write
441 * @throws IOException if an Exception occurs
443 protected final void writeValue(long page, Value value) throws IOException {
444 writeValue(getPage(page), value);
448 * unlinkPages unlinks a set of pages starting at the specified Page.
450 * @param page The starting Page to unlink
451 * @throws IOException if an Exception occurs
453 protected final void unlinkPages(Page page) throws IOException {
454 // Handle the page if it's in primary space by setting its status to
455 // DELETED and freeing any overflow pages linked to it.
456 if (page.pageNum < fileHeader.pageCount) {
457 long nextPage = page.header.nextPage;
459 page.header.setStatus(DELETED);
460 page.header.setNextPage(NO_PAGE);
463 // See if there are any chained pages from the page that was just removed
464 if (nextPage == NO_PAGE) {
467 page = getPage(nextPage);
471 // Add any overflow pages to the list of free pages.
473 // Get the first and last page in the chain.
474 long firstPage = page.pageNum;
476 while (page.header.nextPage != NO_PAGE) {
477 page = getPage(page.header.nextPage);
479 long lastPage = page.pageNum;
482 synchronized (fileHeader) {
483 // If there are already some free pages, add the start of the chain
484 // to the list of free pages.
485 if (fileHeader.lastFreePage != NO_PAGE) {
486 Page p = getPage(fileHeader.lastFreePage);
488 p.header.setNextPage(firstPage);
492 // Otherwise set the chain as the list of free pages.
493 if (fileHeader.firstFreePage == NO_PAGE) {
494 fileHeader.setFirstFreePage(firstPage);
497 // Add a reference to the end of the chain.
498 fileHeader.setLastFreePage(lastPage);
504 * unlinkPages unlinks a set of pages starting at the specified
507 * @param pageNum The starting page number to unlink
508 * @throws IOException if an Exception occurs
510 protected final void unlinkPages(long pageNum) throws IOException {
511 unlinkPages(getPage(pageNum));
515 * getFreePage returns the first free Page from secondary storage.
516 * If no Pages are available, the file is grown as appropriate.
518 * @return The next free Page
519 * @throws IOException if an Exception occurs
521 protected final Page getFreePage() throws IOException {
524 // Synchronize read and write to the fileHeader.firstFreePage
525 synchronized (fileHeader) {
526 if (fileHeader.firstFreePage != NO_PAGE) {
527 // Steal a deleted page
528 p = getPage(fileHeader.firstFreePage);
529 fileHeader.setFirstFreePage(p.getPageHeader().nextPage);
530 if (fileHeader.firstFreePage == NO_PAGE) {
531 fileHeader.setLastFreePage(NO_PAGE);
537 // No deleted pages, grow the file
538 p = getPage(fileHeader.incTotalCount());
541 // Initialize The Page Header (Cleanly)
542 p.header.setNextPage(NO_PAGE);
543 p.header.setStatus(UNUSED);
548 * @throws DBException COL_COLLECTION_CLOSED if paged file is closed
550 protected final void checkOpened() throws DBException {
552 throw new FilerException(FaultCodes.COL_COLLECTION_CLOSED, "Filer is closed");
557 * getFileHeader returns the FileHeader
559 * @return The FileHeader
561 public FileHeader getFileHeader() {
565 public boolean exists() {
566 return file.exists();
569 public boolean create() throws DBException {
575 } catch (Exception e) {
576 throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error creating " + file.getName(), e);
580 private void createFile() throws IOException {
581 RandomAccessFile raf = null;
584 raf = getDescriptor();
585 long o = fileHeader.headerSize + (fileHeader.pageCount + 1) * fileHeader.pageSize - 1;
594 public boolean open() throws DBException {
595 RandomAccessFile raf = null;
599 raf = getDescriptor();
606 } catch (Exception e) {
607 throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error opening " + file.getName(), e);
613 public synchronized boolean close() throws DBException {
616 // First of all, mark as closed to prevent operations
620 synchronized (descriptors) {
621 final int total = descriptorsCount;
623 // Close descriptors in cache
624 while (!descriptors.empty()) {
625 closeDescriptor(descriptors.pop());
627 // Attempt to close descriptors in use. Max wait time = 0.5s * MAX_DESCRIPTORS
628 int n = descriptorsCount;
630 while (descriptorsCount > 0 && n > 0) {
632 descriptors.wait(500);
633 } catch (InterruptedException woken) {
634 Thread.interrupted();
637 if (descriptors.isEmpty()) {
640 closeDescriptor(descriptors.pop());
643 if (descriptorsCount > 0) {
644 LOG.fine(descriptorsCount + " out of " + total + " files were not closed during close.");
647 } catch (Exception e) {
648 // Failed to close, leave open
650 throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error closing " + file.getName(), e);
656 public boolean isOpened() {
660 public boolean drop() throws DBException {
663 return !exists() || getFile().delete();
664 } catch (Exception e) {
665 throw new FilerException(FaultCodes.COL_CANNOT_DROP, "Can't drop " + file.getName(), e);
669 void addDirty(Page page) throws IOException {
670 synchronized (dirtyLock) {
671 dirty.put(page.pageNum, page);
672 if (dirty.size() > MAX_DIRTY_SIZE) {
674 // Too many dirty pages... flush them
676 } catch (Exception e) {
677 throw new IOException(e.getMessage());
683 public void flush() throws DBException {
684 // This method is not synchronized
686 // Error flag/counter
689 // Obtain collection of dirty pages
690 Collection<Page> pages;
692 synchronized (dirtyLock) {
693 pages = dirty.values();
694 dirty = new HashMap<Long, Page>();
698 for (Object page : pages) {
699 Page p = (Page) page;
703 } catch (Exception e) {
704 LOG.log(Level.WARNING, "Exception while flushing page", e);
710 if (fileHeader.dirty) {
713 } catch (Exception e) {
714 LOG.log(Level.WARNING, "Exception while flushing file header", e);
720 throw new FilerException(FaultCodes.GEN_CRITICAL_ERROR, "Error performing flush! Failed to flush " + error + " pages!");
725 * createFileHeader must be implemented by a Paged implementation
726 * in order to create an appropriate subclass instance of a FileHeader.
728 * @return a new FileHeader
730 public abstract FileHeader createFileHeader();
733 * createFileHeader must be implemented by a Paged implementation
734 * in order to create an appropriate subclass instance of a FileHeader.
736 * @param read If true, reads the FileHeader from disk
737 * @return a new FileHeader
738 * @throws IOException if an exception occurs
740 public abstract FileHeader createFileHeader(boolean read) throws IOException;
743 * createFileHeader must be implemented by a Paged implementation
744 * in order to create an appropriate subclass instance of a FileHeader.
746 * @param pageCount The number of pages to allocate for primary storage
747 * @return a new FileHeader
749 public abstract FileHeader createFileHeader(long pageCount);
752 * createFileHeader must be implemented by a Paged implementation
753 * in order to create an appropriate subclass instance of a FileHeader.
755 * @param pageCount The number of pages to allocate for primary storage
756 * @param pageSize The size of a Page (should be a multiple of a FS block)
757 * @return a new FileHeader
759 public abstract FileHeader createFileHeader(long pageCount, int pageSize);
762 * createPageHeader must be implemented by a Paged implementation
763 * in order to create an appropriate subclass instance of a PageHeader.
765 * @return a new PageHeader
767 public abstract PageHeader createPageHeader();
769 // These are a bunch of utility methods for subclasses
771 public static Value[] insertArrayValue(Value[] vals, Value val, int idx) {
772 Value[] newVals = new Value[vals.length + 1];
775 System.arraycopy(vals, 0, newVals, 0, idx);
778 if (idx < vals.length) {
779 System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
784 public static Value[] deleteArrayValue(Value[] vals, int idx) {
785 Value[] newVals = new Value[vals.length - 1];
788 System.arraycopy(vals, 0, newVals, 0, idx);
790 if (idx < newVals.length) {
791 System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
796 public static long[] insertArrayLong(long[] vals, long val, int idx) {
797 long[] newVals = new long[vals.length + 1];
800 System.arraycopy(vals, 0, newVals, 0, idx);
803 if (idx < vals.length) {
804 System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
809 public static long[] deleteArrayLong(long[] vals, int idx) {
810 long[] newVals = new long[vals.length - 1];
813 System.arraycopy(vals, 0, newVals, 0, idx);
815 if (idx < newVals.length) {
816 System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
821 public static int[] insertArrayInt(int[] vals, int val, int idx) {
822 int[] newVals = new int[vals.length + 1];
825 System.arraycopy(vals, 0, newVals, 0, idx);
828 if (idx < vals.length) {
829 System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
834 public static int[] deleteArrayInt(int[] vals, int idx) {
835 int[] newVals = new int[vals.length - 1];
838 System.arraycopy(vals, 0, newVals, 0, idx);
840 if (idx < newVals.length) {
841 System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
846 public static short[] insertArrayShort(short[] vals, short val, int idx) {
847 short[] newVals = new short[vals.length + 1];
850 System.arraycopy(vals, 0, newVals, 0, idx);
853 if (idx < vals.length) {
854 System.arraycopy(vals, idx, newVals, idx + 1, vals.length - idx);
860 public static short[] deleteArrayShort(short[] vals, int idx) {
861 short[] newVals = new short[vals.length - 1];
864 System.arraycopy(vals, 0, newVals, 0, idx);
866 if (idx < newVals.length) {
867 System.arraycopy(vals, idx + 1, newVals, idx, newVals.length - idx);
874 * Paged file's header
876 public abstract class FileHeader {
877 private boolean dirty = false;
878 private int workSize;
880 private short headerSize;
881 private int pageSize;
882 private long pageCount;
883 private long totalCount;
884 private long firstFreePage = -1;
885 private long lastFreePage = -1;
886 private byte pageHeaderSize = 64;
887 private short maxKeySize = 256;
888 private long recordCount;
890 public FileHeader() {
894 public FileHeader(long pageCount) {
895 this(pageCount, 4096);
898 public FileHeader(long pageCount, int pageSize) {
899 this.pageSize = pageSize;
900 this.pageCount = pageCount;
901 totalCount = pageCount;
902 headerSize = (short) 4096;
906 public FileHeader(boolean read) throws IOException {
912 public synchronized final void read() throws IOException {
913 RandomAccessFile raf = null;
916 raf = getDescriptor();
925 public synchronized void read(RandomAccessFile raf) throws IOException {
926 headerSize = raf.readShort();
927 pageSize = raf.readInt();
928 pageCount = raf.readLong();
929 totalCount = raf.readLong();
930 firstFreePage = raf.readLong();
931 lastFreePage = raf.readLong();
932 pageHeaderSize = raf.readByte();
933 maxKeySize = raf.readShort();
934 recordCount = raf.readLong();
937 public synchronized final void write() throws IOException {
942 RandomAccessFile raf = null;
945 raf = getDescriptor();
954 public synchronized void write(RandomAccessFile raf) throws IOException {
955 raf.writeShort(headerSize);
956 raf.writeInt(pageSize);
957 raf.writeLong(pageCount);
958 raf.writeLong(totalCount);
959 raf.writeLong(firstFreePage);
960 raf.writeLong(lastFreePage);
961 raf.writeByte(pageHeaderSize);
962 raf.writeShort(maxKeySize);
963 raf.writeLong(recordCount);
966 public synchronized final void setDirty() {
970 public synchronized final boolean isDirty() {
975 * The size of the FileHeader. Usually 1 OS Page.
976 * This method should be called only while initializing Paged, not during normal processing.
977 * @param headerSize the new header size
979 public synchronized final void setHeaderSize(short headerSize) {
980 this.headerSize = headerSize;
985 * The size of the FileHeader. Usually 1 OS Page
986 * @return the header size
988 public synchronized final short getHeaderSize() {
993 * The size of a page. Usually a multiple of a FS block.
994 * This method should be called only while initializing Paged, not during normal processing.
995 * @param pageSize the new page size
997 public synchronized final void setPageSize(int pageSize) {
998 this.pageSize = pageSize;
1004 * The size of a page. Usually a multiple of a FS block
1005 * @return the page size
1007 public synchronized final int getPageSize() {
1012 * The number of pages in primary storage.
1013 * This method should be called only while initializing Paged, not during normal processing.
1014 * @param pageCount the new page count
1016 public synchronized final void setPageCount(long pageCount) {
1017 this.pageCount = pageCount;
1022 * The number of pages in primary storage
1023 * @return the page count
1025 public synchronized final long getPageCount() {
1030 * The number of total pages in the file.
1031 * This method should be called only while initializing Paged, not during normal processing.
1032 * @param totalCount the new total count
1034 public synchronized final void setTotalCount(long totalCount) {
1035 this.totalCount = totalCount;
1039 public synchronized final long incTotalCount() {
1041 return this.totalCount++;
1045 * The number of total pages in the file
1046 * @return the total count
1048 public synchronized final long getTotalCount() {
1053 * The first free page in unused secondary space
1054 * @param firstFreePage the new first free page
1056 public synchronized final void setFirstFreePage(long firstFreePage) {
1057 this.firstFreePage = firstFreePage;
1062 * The first free page in unused secondary space
1063 * @return the first free page
1065 public synchronized final long getFirstFreePage() {
1066 return firstFreePage;
1070 * The last free page in unused secondary space
1071 * @param lastFreePage sets the last free page
1073 public synchronized final void setLastFreePage(long lastFreePage) {
1074 this.lastFreePage = lastFreePage;
1079 * The last free page in unused secondary space
1080 * @return the last free page
1082 public synchronized final long getLastFreePage() {
1083 return lastFreePage;
1087 * Set the size of a page header.
1089 * Normally, 64 is sufficient.
1090 * @param pageHeaderSize the new page header size
1092 public synchronized final void setPageHeaderSize(byte pageHeaderSize) {
1093 this.pageHeaderSize = pageHeaderSize;
1094 calculateWorkSize();
1099 * Get the size of a page header.
1101 * Normally, 64 is sufficient
1102 * @return the page header size
1104 public synchronized final byte getPageHeaderSize() {
1105 return pageHeaderSize;
1109 * Set the maximum number of bytes a key can be.
1111 * Normally, 256 is good
1112 * @param maxKeySize the new max key size
1114 public synchronized final void setMaxKeySize(short maxKeySize) {
1115 this.maxKeySize = maxKeySize;
1120 * Get the maximum number of bytes.
1122 * Normally, 256 is good.
1123 * @return max key size
1125 public synchronized final short getMaxKeySize() {
1130 * Increment the number of records being managed by the file
1132 public synchronized final void incRecordCount() {
1138 * Decrement the number of records being managed by the file
1140 public synchronized final void decRecordCount() {
1146 * The number of records being managed by the file (not pages)
1147 * @return the record count
1149 public synchronized final long getRecordCount() {
1153 private synchronized void calculateWorkSize() {
1154 workSize = pageSize - pageHeaderSize;
1157 public synchronized final int getWorkSize() {
1167 public abstract class PageHeader implements Streamable {
1168 private boolean dirty = false;
1169 private byte status = UNUSED;
1170 private int keyLen = 0;
1171 private int keyHash = 0;
1172 private int dataLen = 0;
1173 private int recordLen = 0;
1174 private long nextPage = -1;
1176 public PageHeader() {}
1178 public PageHeader(DataInputStream dis) throws IOException {
1182 public synchronized void read(DataInputStream dis) throws IOException {
1183 status = dis.readByte();
1185 if (status == UNUSED) {
1189 keyLen = dis.readInt();
1191 // hack for win98/ME - see issue 564
1194 keyHash = dis.readInt();
1195 dataLen = dis.readInt();
1196 recordLen = dis.readInt();
1197 nextPage = dis.readLong();
1200 public synchronized void write(DataOutputStream dos) throws IOException {
1202 dos.writeByte(status);
1203 dos.writeInt(keyLen);
1204 dos.writeInt(keyHash);
1205 dos.writeInt(dataLen);
1206 dos.writeInt(recordLen);
1207 dos.writeLong(nextPage);
1210 public synchronized final boolean isDirty() {
1214 public synchronized final void setDirty() {
1219 * The status of this page (UNUSED, RECORD, DELETED, etc...)
1220 * @param status the new status
1222 public synchronized final void setStatus(byte status) {
1223 this.status = status;
1228 * The status of this page (UNUSED, RECORD, DELETED, etc...)
1229 * @return the status
1231 public synchronized final byte getStatus() {
1235 public synchronized final void setKey(Key key) {
1236 // setKey WIPES OUT the Page data
1239 keyHash = key.getHash();
1240 keyLen = key.getLength();
1245 * The length of the Key
1246 * @param keyLen the new key length
1248 public synchronized final void setKeyLen(int keyLen) {
1249 this.keyLen = keyLen;
1254 * The length of the Key
1255 * @return the key length
1257 public synchronized final int getKeyLen() {
1262 * The hashed value of the Key for quick comparisons
1263 * @param keyHash sets the key hash
1265 public synchronized final void setKeyHash(int keyHash) {
1266 this.keyHash = keyHash;
1271 * The hashed value of the Key for quick comparisons
1272 * @return the key hash
1274 public synchronized final int getKeyHash() {
1279 * The length of the Data
1280 * @param dataLen sets the data length
1282 public synchronized final void setDataLen(int dataLen) {
1283 this.dataLen = dataLen;
1288 * The length of the Data
1289 * @return the data length
1291 public synchronized final int getDataLen() {
1296 * The length of the Record's value
1297 * @param recordLen sets the record length
1299 public synchronized void setRecordLen(int recordLen) {
1300 this.recordLen = recordLen;
1305 * The length of the Record's value
1306 * @return record length
1308 public synchronized final int getRecordLen() {
1313 * The next page for this Record (if overflowed)
1314 * @param nextPage next page
1316 public synchronized final void setNextPage(long nextPage) {
1317 this.nextPage = nextPage;
1322 * The next page for this Record (if overflowed)
1325 public synchronized final long getNextPage() {
1334 public final class Page implements Comparable<Page> {
1339 private final Long pageNum;
1342 * The Header for this Page
1344 private final PageHeader header;
1347 * The offset into the file that this page starts
1349 private final long offset;
1352 * The data for this page. Null if page is not loaded.
1354 private byte[] data;
1357 * The position (relative) of the Key in the data array
1362 * The position (relative) of the Data in the data array
1364 private int dataPos;
1366 public Page(Long pageNum) {
1367 this.header = createPageHeader();
1368 this.pageNum = pageNum;
1369 this.offset = fileHeader.headerSize + (pageNum * fileHeader.pageSize);
1373 * Reads a page into the memory, once. Subsequent calls are ignored.
1374 * @throws java.io.IOException if an io error occurs
1376 public synchronized void read() throws IOException {
1378 RandomAccessFile raf = null;
1381 byte[] data = new byte[fileHeader.pageSize];
1383 raf = getDescriptor();
1384 raf.seek(this.offset);
1387 // Read in the header
1388 ByteArrayInputStream bis = new ByteArrayInputStream(data);
1390 this.header.read(new DataInputStream(bis));
1392 this.keyPos = fileHeader.pageHeaderSize;
1393 this.dataPos = this.keyPos + this.header.keyLen;
1395 // Successfully read all the data
1404 * Writes out the header into the this.data, and adds a page to the set of
1406 * @throws java.io.IOException if an io error occurs
1408 public void write() throws IOException {
1409 // Write out the header into the this.data
1410 synchronized (this) {
1411 ByteArrayOutputStream bos = new ByteArrayOutputStream(fileHeader.getPageHeaderSize());
1413 header.write(new DataOutputStream(bos));
1414 byte[] b = bos.toByteArray();
1416 System.arraycopy(b, 0, data, 0, b.length);
1419 // Add to the list of dirty pages
1420 Paged.this.addDirty(this);
1424 * Flushes content of the dirty page into the file
1425 * @throws java.io.IOException if an io error occurs
1427 public synchronized void flush() throws IOException {
1428 RandomAccessFile raf = null;
1431 raf = getDescriptor();
1432 if (this.offset >= raf.length()) {
1434 long o = (fileHeader.headerSize + ((fileHeader.totalCount * 3) / 2) * fileHeader.pageSize)
1435 + (fileHeader.pageSize - 1);
1440 raf.seek(this.offset);
1441 raf.write(this.data);
1450 // No synchronization - pageNum is final
1451 public Long getPageNum() {
1452 return this.pageNum;
1455 // No synchronization - header is final
1456 public PageHeader getPageHeader() {
1460 public synchronized void setKey(Key key) {
1462 // Insert the key into the data array.
1463 key.copyTo(this.data, this.keyPos);
1465 // Set the start of data to skip over the key.
1466 this.dataPos = this.keyPos + header.keyLen;
1469 public synchronized Key getKey() {
1470 if (header.keyLen > 0) {
1471 return new Key(this.data, this.keyPos, header.keyLen);
1477 public synchronized void streamTo(OutputStream os) throws IOException {
1478 if (header.dataLen > 0) {
1479 os.write(this.data, this.dataPos, header.dataLen);
1483 public synchronized void streamFrom(InputStream is) throws IOException {
1484 int avail = is.available();
1486 header.dataLen = fileHeader.workSize - header.keyLen;
1487 if (avail < header.dataLen) {
1488 header.dataLen = avail;
1490 if (header.dataLen > 0) {
1491 is.read(this.data, this.keyPos + header.keyLen, header.dataLen);
1495 // No synchronization: pageNum is final.
1496 public int compareTo(Page o) {
1497 return (int) (this.pageNum - o.pageNum);