| %line | %branch | |||||||||
|---|---|---|---|---|---|---|---|---|---|---|
| org.apache.jcs.auxiliary.disk.block.BlockDiskKeyStore$MyThreadFactory |
|
|
| 1 | package org.apache.jcs.auxiliary.disk.block; |
|
| 2 | ||
| 3 | /* |
|
| 4 | * Licensed to the Apache Software Foundation (ASF) under one |
|
| 5 | * or more contributor license agreements. See the NOTICE file |
|
| 6 | * distributed with this work for additional information |
|
| 7 | * regarding copyright ownership. The ASF licenses this file |
|
| 8 | * to you under the Apache License, Version 2.0 (the |
|
| 9 | * "License"); you may not use this file except in compliance |
|
| 10 | * with the License. You may obtain a copy of the License at |
|
| 11 | * |
|
| 12 | * http://www.apache.org/licenses/LICENSE-2.0 |
|
| 13 | * |
|
| 14 | * Unless required by applicable law or agreed to in writing, |
|
| 15 | * software distributed under the License is distributed on an |
|
| 16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
|
| 17 | * KIND, either express or implied. See the License for the |
|
| 18 | * specific language governing permissions and limitations |
|
| 19 | * under the License. |
|
| 20 | */ |
|
| 21 | ||
| 22 | import java.io.BufferedInputStream; |
|
| 23 | import java.io.BufferedOutputStream; |
|
| 24 | import java.io.EOFException; |
|
| 25 | import java.io.File; |
|
| 26 | import java.io.FileInputStream; |
|
| 27 | import java.io.FileOutputStream; |
|
| 28 | import java.io.ObjectInputStream; |
|
| 29 | import java.io.ObjectOutputStream; |
|
| 30 | import java.io.Serializable; |
|
| 31 | import java.util.HashMap; |
|
| 32 | import java.util.Iterator; |
|
| 33 | import java.util.Map; |
|
| 34 | import java.util.Set; |
|
| 35 | ||
| 36 | import org.apache.commons.logging.Log; |
|
| 37 | import org.apache.commons.logging.LogFactory; |
|
| 38 | import org.apache.jcs.auxiliary.disk.LRUMapJCS; |
|
| 39 | import org.apache.jcs.utils.timing.ElapsedTimer; |
|
| 40 | ||
| 41 | import EDU.oswego.cs.dl.util.concurrent.ClockDaemon; |
|
| 42 | import EDU.oswego.cs.dl.util.concurrent.ThreadFactory; |
|
| 43 | ||
| 44 | /** |
|
| 45 | * This is responsible for storing the keys. |
|
| 46 | * <p> |
|
| 47 | * @author Aaron Smuts |
|
| 48 | */ |
|
| 49 | public class BlockDiskKeyStore |
|
| 50 | { |
|
| 51 | /** The logger */ |
|
| 52 | private static final Log log = LogFactory.getLog( BlockDiskKeyStore.class ); |
|
| 53 | ||
| 54 | /** Attributes governing the behavior of the block disk cache. */ |
|
| 55 | private BlockDiskCacheAttributes blockDiskCacheAttributes; |
|
| 56 | ||
| 57 | /** The key to block map */ |
|
| 58 | private Map keyHash; |
|
| 59 | ||
| 60 | /** The file where we persist the keys */ |
|
| 61 | private File keyFile; |
|
| 62 | ||
| 63 | /** The name to prefix log messages with. */ |
|
| 64 | private final String logCacheName; |
|
| 65 | ||
| 66 | /** Name of the file where we persist the keys */ |
|
| 67 | private String fileName; |
|
| 68 | ||
| 69 | /** The maximum number of keys to store in memory */ |
|
| 70 | private int maxKeySize; |
|
| 71 | ||
| 72 | /** we need this so we can communicate free blocks to the data store when keys fall off the LRU */ |
|
| 73 | private BlockDiskCache blockDiskCache; |
|
| 74 | ||
| 75 | /** The root directory in which the keyFile lives */ |
|
| 76 | private File rootDirectory; |
|
| 77 | ||
| 78 | /** |
|
| 79 | * The background key persister, one for all regions. |
|
| 80 | */ |
|
| 81 | private static ClockDaemon persistenceDaemon; |
|
| 82 | ||
| 83 | /** |
|
| 84 | * Set the configuration options. |
|
| 85 | * <p> |
|
| 86 | * @param cacheAttributes |
|
| 87 | * @param blockDiskCache used for freeing |
|
| 88 | * @throws Exception |
|
| 89 | */ |
|
| 90 | public BlockDiskKeyStore( BlockDiskCacheAttributes cacheAttributes, BlockDiskCache blockDiskCache ) |
|
| 91 | throws Exception |
|
| 92 | { |
|
| 93 | this.blockDiskCacheAttributes = cacheAttributes; |
|
| 94 | this.logCacheName = "Region [" + this.blockDiskCacheAttributes.getCacheName() + "] "; |
|
| 95 | this.fileName = class="keyword">this.blockDiskCacheAttributes.getCacheName(); |
|
| 96 | this.maxKeySize = cacheAttributes.getMaxKeySize(); |
|
| 97 | this.blockDiskCache = blockDiskCache; |
|
| 98 | ||
| 99 | String rootDirName = cacheAttributes.getDiskPath(); |
|
| 100 | this.rootDirectory = new File( rootDirName ); |
|
| 101 | this.rootDirectory.mkdirs(); |
|
| 102 | ||
| 103 | if ( log.isInfoEnabled() ) |
|
| 104 | { |
|
| 105 | log.info( logCacheName + "Cache file root directory [" + rootDirName + "]" ); |
|
| 106 | } |
|
| 107 | ||
| 108 | this.keyFile = new File( rootDirectory, fileName + ".key" ); |
|
| 109 | ||
| 110 | if ( log.isInfoEnabled() ) |
|
| 111 | { |
|
| 112 | log.info( logCacheName + "Key File [" + this.keyFile.getAbsolutePath() + "]" ); |
|
| 113 | } |
|
| 114 | ||
| 115 | if ( keyFile.length() > 0 ) |
|
| 116 | { |
|
| 117 | loadKeys(); |
|
| 118 | // TODO verify somehow |
|
| 119 | } |
|
| 120 | else |
|
| 121 | { |
|
| 122 | initKeyMap(); |
|
| 123 | } |
|
| 124 | ||
| 125 | // add this region to the persistence thread. |
|
| 126 | // TODO we might need to stagger this a bit. |
|
| 127 | if ( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() > 0 ) |
|
| 128 | { |
|
| 129 | if ( persistenceDaemon == null ) |
|
| 130 | { |
|
| 131 | persistenceDaemon = new ClockDaemon(); |
|
| 132 | persistenceDaemon.setThreadFactory( new MyThreadFactory() ); |
|
| 133 | } |
|
| 134 | persistenceDaemon |
|
| 135 | .executePeriodically( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() * 1000, |
|
| 136 | new Runnable() |
|
| 137 | { |
|
| 138 | public void run() |
|
| 139 | { |
|
| 140 | saveKeys(); |
|
| 141 | } |
|
| 142 | }, false ); |
|
| 143 | } |
|
| 144 | } |
|
| 145 | ||
| 146 | /** |
|
| 147 | * Saves key file to disk. This gets the LRUMap entry set and write the entries out one by one |
|
| 148 | * after putting them in a wrapper. |
|
| 149 | */ |
|
| 150 | protected void saveKeys() |
|
| 151 | { |
|
| 152 | try |
|
| 153 | { |
|
| 154 | ElapsedTimer timer = new ElapsedTimer(); |
|
| 155 | int numKeys = keyHash.size(); |
|
| 156 | if ( log.isInfoEnabled() ) |
|
| 157 | { |
|
| 158 | log.info( logCacheName + "Saving keys to [" + this.keyFile.getAbsolutePath() + "], key count [" |
|
| 159 | + numKeys + "]" ); |
|
| 160 | } |
|
| 161 | ||
| 162 | keyFile.delete(); |
|
| 163 | ||
| 164 | keyFile = new File( rootDirectory, fileName + ".key" ); |
|
| 165 | FileOutputStream fos = new FileOutputStream( keyFile ); |
|
| 166 | BufferedOutputStream bos = new BufferedOutputStream( fos, 1024 ); |
|
| 167 | ObjectOutputStream oos = new ObjectOutputStream( bos ); |
|
| 168 | try |
|
| 169 | { |
|
| 170 | // don't need to synchronize, since the underlying collection makes a copy |
|
| 171 | Iterator keyIt = keyHash.entrySet().iterator(); |
|
| 172 | while ( keyIt.hasNext() ) |
|
| 173 | { |
|
| 174 | Map.Entry entry = (Map.Entry) keyIt.next(); |
|
| 175 | BlockDiskElementDescriptor descriptor = new BlockDiskElementDescriptor(); |
|
| 176 | descriptor.setKey( (Serializable) entry.getKey() ); |
|
| 177 | descriptor.setBlocks( (int[]) entry.getValue() ); |
|
| 178 | // stream these out in the loop. |
|
| 179 | oos.writeObject( descriptor ); |
|
| 180 | } |
|
| 181 | } |
|
| 182 | finally |
|
| 183 | { |
|
| 184 | oos.flush(); |
|
| 185 | oos.close(); |
|
| 186 | } |
|
| 187 | ||
| 188 | if ( log.isInfoEnabled() ) |
|
| 189 | { |
|
| 190 | log.info( logCacheName + "Finished saving keys. It took " + timer.getElapsedTimeString() + " to store " |
|
| 191 | + numKeys + " keys. Key file length [" + keyFile.length() + "]" ); |
|
| 192 | } |
|
| 193 | } |
|
| 194 | catch ( Exception e ) |
|
| 195 | { |
|
| 196 | log.error( logCacheName + "Problem storing keys.", e ); |
|
| 197 | } |
|
| 198 | } |
|
| 199 | ||
| 200 | /** |
|
| 201 | * Resets the file and creates a new key map. |
|
| 202 | */ |
|
| 203 | protected void reset() |
|
| 204 | { |
|
| 205 | File keyFileTemp = new File( this.rootDirectory, fileName + ".key" ); |
|
| 206 | keyFileTemp.delete(); |
|
| 207 | ||
| 208 | keyFile = new File( this.rootDirectory, fileName + ".key" ); |
|
| 209 | ||
| 210 | initKeyMap(); |
|
| 211 | } |
|
| 212 | ||
| 213 | /** |
|
| 214 | * This is mainly used for testing. It leave the disk in tact, and just clears memory. |
|
| 215 | */ |
|
| 216 | protected void clearMemoryMap() |
|
| 217 | { |
|
| 218 | this.keyHash.clear(); |
|
| 219 | } |
|
| 220 | ||
| 221 | /** |
|
| 222 | * Create the map for keys that contain the index position on disk. |
|
| 223 | */ |
|
| 224 | private void initKeyMap() |
|
| 225 | { |
|
| 226 | keyHash = null; |
|
| 227 | if ( maxKeySize >= 0 ) |
|
| 228 | { |
|
| 229 | keyHash = new LRUMap( maxKeySize ); |
|
| 230 | if ( log.isInfoEnabled() ) |
|
| 231 | { |
|
| 232 | log.info( logCacheName + "Set maxKeySize to: '" + maxKeySize + "'" ); |
|
| 233 | } |
|
| 234 | } |
|
| 235 | else |
|
| 236 | { |
|
| 237 | // If no max size, use a plain map for memory and processing efficiency. |
|
| 238 | keyHash = new HashMap(); |
|
| 239 | // keyHash = Collections.synchronizedMap( new HashMap() ); |
|
| 240 | if ( log.isInfoEnabled() ) |
|
| 241 | { |
|
| 242 | log.info( logCacheName + "Set maxKeySize to unlimited'" ); |
|
| 243 | } |
|
| 244 | } |
|
| 245 | } |
|
| 246 | ||
| 247 | /** |
|
| 248 | * Loads the keys from the .key file. The keys are stored individually on disk. They are added |
|
| 249 | * one by one to an LRUMap.. |
|
| 250 | * <p> |
|
| 251 | * @throws InterruptedException |
|
| 252 | */ |
|
| 253 | protected void loadKeys() |
|
| 254 | throws InterruptedException |
|
| 255 | { |
|
| 256 | if ( log.isInfoEnabled() ) |
|
| 257 | { |
|
| 258 | log.info( logCacheName + "Loading keys for " + keyFile.toString() ); |
|
| 259 | } |
|
| 260 | ||
| 261 | try |
|
| 262 | { |
|
| 263 | // create a key map to use. |
|
| 264 | initKeyMap(); |
|
| 265 | ||
| 266 | HashMap keys = new HashMap(); |
|
| 267 | ||
| 268 | FileInputStream fis = new FileInputStream( keyFile ); |
|
| 269 | BufferedInputStream bis = new BufferedInputStream( fis ); |
|
| 270 | ObjectInputStream ois = new ObjectInputStream( bis ); |
|
| 271 | try |
|
| 272 | { |
|
| 273 | while ( true ) |
|
| 274 | { |
|
| 275 | BlockDiskElementDescriptor descriptor = (BlockDiskElementDescriptor) ois.readObject(); |
|
| 276 | if ( descriptor != null ) |
|
| 277 | { |
|
| 278 | keys.put( descriptor.getKey(), descriptor.getBlocks() ); |
|
| 279 | } |
|
| 280 | } |
|
| 281 | } |
|
| 282 | catch ( EOFException eof ) |
|
| 283 | { |
|
| 284 | // nothing |
|
| 285 | } |
|
| 286 | finally |
|
| 287 | { |
|
| 288 | ois.close(); |
|
| 289 | } |
|
| 290 | ||
| 291 | if ( !keys.isEmpty() ) |
|
| 292 | { |
|
| 293 | if ( log.isDebugEnabled() ) |
|
| 294 | { |
|
| 295 | log.debug( logCacheName + "Found " + keys.size() + " in keys file." ); |
|
| 296 | } |
|
| 297 | ||
| 298 | keyHash.putAll( keys ); |
|
| 299 | ||
| 300 | if ( log.isInfoEnabled() ) |
|
| 301 | { |
|
| 302 | log.info( logCacheName + "Loaded keys from [" + fileName + "], key count: " + keyHash.size() |
|
| 303 | + "; up to " + maxKeySize + " will be available." ); |
|
| 304 | } |
|
| 305 | } |
|
| 306 | } |
|
| 307 | catch ( Exception e ) |
|
| 308 | { |
|
| 309 | log.error( logCacheName + "Problem loading keys for file " + fileName, e ); |
|
| 310 | } |
|
| 311 | } |
|
| 312 | ||
| 313 | /** |
|
| 314 | * Gets the entry set. |
|
| 315 | * <p> |
|
| 316 | * @return entry set. |
|
| 317 | */ |
|
| 318 | public Set entrySet() |
|
| 319 | { |
|
| 320 | return this.keyHash.entrySet(); |
|
| 321 | } |
|
| 322 | ||
| 323 | /** |
|
| 324 | * Gets the key set. |
|
| 325 | * <p> |
|
| 326 | * @return key set. |
|
| 327 | */ |
|
| 328 | public Set keySet() |
|
| 329 | { |
|
| 330 | return this.keyHash.keySet(); |
|
| 331 | } |
|
| 332 | ||
| 333 | /** |
|
| 334 | * Gets the size of the key hash. |
|
| 335 | * <p> |
|
| 336 | * @return the number of keys. |
|
| 337 | */ |
|
| 338 | public int size() |
|
| 339 | { |
|
| 340 | return this.keyHash.size(); |
|
| 341 | } |
|
| 342 | ||
| 343 | /** |
|
| 344 | * gets the object for the key. |
|
| 345 | * <p> |
|
| 346 | * @param key |
|
| 347 | * @return Object |
|
| 348 | */ |
|
| 349 | public int[] get( Object key ) |
|
| 350 | { |
|
| 351 | return (int[]) this.keyHash.get( key ); |
|
| 352 | } |
|
| 353 | ||
| 354 | /** |
|
| 355 | * Puts a int[] in the keyStore. |
|
| 356 | * <p> |
|
| 357 | * @param key |
|
| 358 | * @param value |
|
| 359 | */ |
|
| 360 | public void put( Object key, int[] value ) |
|
| 361 | { |
|
| 362 | this.keyHash.put( key, value ); |
|
| 363 | } |
|
| 364 | ||
| 365 | /** |
|
| 366 | * Remove by key. |
|
| 367 | * <p> |
|
| 368 | * @param key |
|
| 369 | * @return BlockDiskElementDescriptor if it was present, else null |
|
| 370 | */ |
|
| 371 | public int[] remove( Object key ) |
|
| 372 | { |
|
| 373 | return (int[]) this.keyHash.remove( key ); |
|
| 374 | } |
|
| 375 | ||
| 376 | /** |
|
| 377 | * Class for recylcing and lru. This implments the LRU overflow callback, so we can mark the |
|
| 378 | * blocks as free. |
|
| 379 | */ |
|
| 380 | public class LRUMap |
|
| 381 | extends LRUMapJCS |
|
| 382 | { |
|
| 383 | /** Don't change */ |
|
| 384 | private static final long serialVersionUID = 4955079991472142198L; |
|
| 385 | ||
| 386 | /** |
|
| 387 | * <code>tag</code> tells us which map we are working on. |
|
| 388 | */ |
|
| 389 | public String tag = "orig"; |
|
| 390 | ||
| 391 | /** |
|
| 392 | * Default |
|
| 393 | */ |
|
| 394 | public LRUMap() |
|
| 395 | { |
|
| 396 | super(); |
|
| 397 | } |
|
| 398 | ||
| 399 | /** |
|
| 400 | * @param maxKeySize |
|
| 401 | */ |
|
| 402 | public LRUMap( int maxKeySize ) |
|
| 403 | { |
|
| 404 | super( maxKeySize ); |
|
| 405 | } |
|
| 406 | ||
| 407 | /** |
|
| 408 | * This is called when the may key size is reaced. The least recently used item will be |
|
| 409 | * passed here. We will store the position and size of the spot on disk in the recycle bin. |
|
| 410 | * <p> |
|
| 411 | * @param key |
|
| 412 | * @param value |
|
| 413 | */ |
|
| 414 | protected void processRemovedLRU( Object key, Object value ) |
|
| 415 | { |
|
| 416 | blockDiskCache.freeBlocks( (int[]) value ); |
|
| 417 | if ( log.isDebugEnabled() ) |
|
| 418 | { |
|
| 419 | log.debug( logCacheName + "Removing key: [" + key + "] from key store." ); |
|
| 420 | log.debug( logCacheName + "Key store size: [" + this.size() + "]." ); |
|
| 421 | } |
|
| 422 | } |
|
| 423 | } |
|
| 424 | ||
| 425 | /** |
|
| 426 | * Allows us to set the daemon status on the clockdaemon |
|
| 427 | * @author aaronsm |
|
| 428 | */ |
|
| 429 | 24 | class MyThreadFactory |
| 430 | implements ThreadFactory |
|
| 431 | { |
|
| 432 | ||
| 433 | /** |
|
| 434 | * Ensures that we create daemon threads. |
|
| 435 | * <p> |
|
| 436 | * (non-Javadoc) |
|
| 437 | * @see EDU.oswego.cs.dl.util.concurrent.ThreadFactory#newThread(java.lang.Runnable) |
|
| 438 | */ |
|
| 439 | public Thread newThread( Runnable runner ) |
|
| 440 | { |
|
| 441 | 24 | Thread t = new Thread( runner ); |
| 442 | 24 | t.setDaemon( true ); |
| 443 | 24 | t.setPriority( Thread.MIN_PRIORITY ); |
| 444 | 24 | return t; |
| 445 | } |
|
| 446 | } |
|
| 447 | } |
| This report is generated by jcoverage, Maven and Maven JCoverage Plugin. |