DfsObjDatabase.java

  1. /*
  2.  * Copyright (C) 2011, Google Inc. and others
  3.  *
  4.  * This program and the accompanying materials are made available under the
  5.  * terms of the Eclipse Distribution License v. 1.0 which is available at
  6.  * https://www.eclipse.org/org/documents/edl-v10.php.
  7.  *
  8.  * SPDX-License-Identifier: BSD-3-Clause
  9.  */

  10. package org.eclipse.jgit.internal.storage.dfs;

  11. import static java.util.stream.Collectors.joining;

  12. import java.io.FileNotFoundException;
  13. import java.io.IOException;
  14. import java.util.ArrayList;
  15. import java.util.Arrays;
  16. import java.util.Collection;
  17. import java.util.Collections;
  18. import java.util.Comparator;
  19. import java.util.HashMap;
  20. import java.util.HashSet;
  21. import java.util.List;
  22. import java.util.Map;
  23. import java.util.Set;
  24. import java.util.concurrent.atomic.AtomicReference;

  25. import org.eclipse.jgit.internal.storage.pack.PackExt;
  26. import org.eclipse.jgit.lib.AnyObjectId;
  27. import org.eclipse.jgit.lib.ObjectDatabase;
  28. import org.eclipse.jgit.lib.ObjectInserter;
  29. import org.eclipse.jgit.lib.ObjectReader;

  30. /**
  31.  * Manages objects stored in
  32.  * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackFile} on a storage
  33.  * system.
  34.  */
  35. public abstract class DfsObjDatabase extends ObjectDatabase {
  36.     private static final PackList NO_PACKS = new PackList(
  37.             new DfsPackFile[0],
  38.             new DfsReftable[0]) {
  39.         @Override
  40.         boolean dirty() {
  41.             return true;
  42.         }

  43.         @Override
  44.         void clearDirty() {
  45.             // Always dirty.
  46.         }

  47.         @Override
  48.         public void markDirty() {
  49.             // Always dirty.
  50.         }
  51.     };

  52.     /**
  53.      * Sources for a pack file.
  54.      * <p>
  55.      * <strong>Note:</strong> When sorting packs by source, do not use the default
  56.      * comparator based on {@link Enum#compareTo}. Prefer {@link
  57.      * #DEFAULT_COMPARATOR} or your own {@link ComparatorBuilder}.
  58.      */
  59.     public enum PackSource {
  60.         /** The pack is created by ObjectInserter due to local activity. */
  61.         INSERT,

  62.         /**
  63.          * The pack is created by PackParser due to a network event.
  64.          * <p>
  65.          * A received pack can be from either a push into the repository, or a
  66.          * fetch into the repository, the direction doesn't matter. A received
  67.          * pack was built by the remote Git implementation and may not match the
  68.          * storage layout preferred by this version. Received packs are likely
  69.          * to be either compacted or garbage collected in the future.
  70.          */
  71.         RECEIVE,

  72.         /**
  73.          * The pack was created by compacting multiple packs together.
  74.          * <p>
  75.          * Packs created by compacting multiple packs together aren't nearly as
  76.          * efficient as a fully garbage collected repository, but may save disk
  77.          * space by reducing redundant copies of base objects.
  78.          *
  79.          * @see DfsPackCompactor
  80.          */
  81.         COMPACT,

  82.         /**
  83.          * Pack was created by Git garbage collection by this implementation.
  84.          * <p>
  85.          * This source is only used by the {@link DfsGarbageCollector} when it
  86.          * builds a pack file by traversing the object graph and copying all
  87.          * reachable objects into a new pack stream.
  88.          *
  89.          * @see DfsGarbageCollector
  90.          */
  91.         GC,

  92.         /** Created from non-heads by {@link DfsGarbageCollector}. */
  93.         GC_REST,

  94.         /**
  95.          * Pack was created by Git garbage collection.
  96.          * <p>
  97.          * This pack contains only unreachable garbage that was found during the
  98.          * last GC pass. It is retained in a new pack until it is safe to prune
  99.          * these objects from the repository.
  100.          */
  101.         UNREACHABLE_GARBAGE;

  102.         /**
  103.          * Default comparator for sources.
  104.          * <p>
  105.          * Sorts generally newer, smaller types such as {@code INSERT} and {@code
  106.          * RECEIVE} earlier; older, larger types such as {@code GC} later; and
  107.          * {@code UNREACHABLE_GARBAGE} at the end.
  108.          */
  109.         public static final Comparator<PackSource> DEFAULT_COMPARATOR =
  110.                 new ComparatorBuilder()
  111.                         .add(INSERT, RECEIVE)
  112.                         .add(COMPACT)
  113.                         .add(GC)
  114.                         .add(GC_REST)
  115.                         .add(UNREACHABLE_GARBAGE)
  116.                         .build();

  117.         /**
  118.          * Builder for describing {@link PackSource} ordering where some values are
  119.          * explicitly considered equal to others.
  120.          */
  121.         public static class ComparatorBuilder {
  122.             private final Map<PackSource, Integer> ranks = new HashMap<>();
  123.             private int counter;

  124.             /**
  125.              * Add a collection of sources that should sort as equal.
  126.              * <p>
  127.              * Sources in the input will sort after sources listed in previous calls
  128.              * to this method.
  129.              *
  130.              * @param sources
  131.              *            sources in this equivalence class.
  132.              * @return this.
  133.              */
  134.             public ComparatorBuilder add(PackSource... sources) {
  135.                 for (PackSource s : sources) {
  136.                     ranks.put(s, Integer.valueOf(counter));
  137.                 }
  138.                 counter++;
  139.                 return this;
  140.             }

  141.             /**
  142.              * Build the comparator.
  143.              *
  144.              * @return new comparator instance.
  145.              * @throws IllegalArgumentException
  146.              *             not all {@link PackSource} instances were explicitly assigned
  147.              *             an equivalence class.
  148.              */
  149.             public Comparator<PackSource> build() {
  150.                 return new PackSourceComparator(ranks);
  151.             }
  152.         }

  153.         private static class PackSourceComparator implements Comparator<PackSource> {
  154.             private final Map<PackSource, Integer> ranks;

  155.             private PackSourceComparator(Map<PackSource, Integer> ranks) {
  156.                 if (!ranks.keySet().equals(
  157.                             new HashSet<>(Arrays.asList(PackSource.values())))) {
  158.                     throw new IllegalArgumentException();
  159.                 }
  160.                 this.ranks = new HashMap<>(ranks);
  161.             }

  162.             @Override
  163.             public int compare(PackSource a, PackSource b) {
  164.                 return ranks.get(a).compareTo(ranks.get(b));
  165.             }

  166.             @Override
  167.             public String toString() {
  168.                 return Arrays.stream(PackSource.values())
  169.                         .map(s -> s + "=" + ranks.get(s)) //$NON-NLS-1$
  170.                         .collect(joining(", ", getClass().getSimpleName() + "{", "}")); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
  171.             }
  172.         }
  173.     }

  174.     private final AtomicReference<PackList> packList;

  175.     private final DfsRepository repository;

  176.     private DfsReaderOptions readerOptions;

  177.     private Comparator<DfsPackDescription> packComparator;

  178.     /**
  179.      * Initialize an object database for our repository.
  180.      *
  181.      * @param repository
  182.      *            repository owning this object database.
  183.      * @param options
  184.      *            how readers should access the object database.
  185.      */
  186.     protected DfsObjDatabase(DfsRepository repository,
  187.             DfsReaderOptions options) {
  188.         this.repository = repository;
  189.         this.packList = new AtomicReference<>(NO_PACKS);
  190.         this.readerOptions = options;
  191.         this.packComparator = DfsPackDescription.objectLookupComparator();
  192.     }

  193.     /**
  194.      * Get configured reader options, such as read-ahead.
  195.      *
  196.      * @return configured reader options, such as read-ahead.
  197.      */
  198.     public DfsReaderOptions getReaderOptions() {
  199.         return readerOptions;
  200.     }

  201.     /**
  202.      * Set the comparator used when searching for objects across packs.
  203.      * <p>
  204.      * An optimal comparator will find more objects without having to load large
  205.      * idx files from storage only to find that they don't contain the object.
  206.      * See {@link DfsPackDescription#objectLookupComparator()} for the default
  207.      * heuristics.
  208.      *
  209.      * @param packComparator
  210.      *            comparator.
  211.      */
  212.     public void setPackComparator(Comparator<DfsPackDescription> packComparator) {
  213.         this.packComparator = packComparator;
  214.     }

  215.     /** {@inheritDoc} */
  216.     @Override
  217.     public DfsReader newReader() {
  218.         return new DfsReader(this);
  219.     }

  220.     /** {@inheritDoc} */
  221.     @Override
  222.     public ObjectInserter newInserter() {
  223.         return new DfsInserter(this);
  224.     }

  225.     /**
  226.      * Scan and list all available pack files in the repository.
  227.      *
  228.      * @return list of available packs. The returned array is shared with the
  229.      *         implementation and must not be modified by the caller.
  230.      * @throws java.io.IOException
  231.      *             the pack list cannot be initialized.
  232.      */
  233.     public DfsPackFile[] getPacks() throws IOException {
  234.         return getPackList().packs;
  235.     }

  236.     /**
  237.      * Scan and list all available reftable files in the repository.
  238.      *
  239.      * @return list of available reftables. The returned array is shared with
  240.      *         the implementation and must not be modified by the caller.
  241.      * @throws java.io.IOException
  242.      *             the pack list cannot be initialized.
  243.      */
  244.     public DfsReftable[] getReftables() throws IOException {
  245.         return getPackList().reftables;
  246.     }

  247.     /**
  248.      * Scan and list all available pack files in the repository.
  249.      *
  250.      * @return list of available packs, with some additional metadata. The
  251.      *         returned array is shared with the implementation and must not be
  252.      *         modified by the caller.
  253.      * @throws java.io.IOException
  254.      *             the pack list cannot be initialized.
  255.      */
  256.     public PackList getPackList() throws IOException {
  257.         return scanPacks(NO_PACKS);
  258.     }

  259.     /**
  260.      * Get repository owning this object database.
  261.      *
  262.      * @return repository owning this object database.
  263.      */
  264.     protected DfsRepository getRepository() {
  265.         return repository;
  266.     }

  267.     /**
  268.      * List currently known pack files in the repository, without scanning.
  269.      *
  270.      * @return list of available packs. The returned array is shared with the
  271.      *         implementation and must not be modified by the caller.
  272.      */
  273.     public DfsPackFile[] getCurrentPacks() {
  274.         return getCurrentPackList().packs;
  275.     }

  276.     /**
  277.      * List currently known reftable files in the repository, without scanning.
  278.      *
  279.      * @return list of available reftables. The returned array is shared with
  280.      *         the implementation and must not be modified by the caller.
  281.      */
  282.     public DfsReftable[] getCurrentReftables() {
  283.         return getCurrentPackList().reftables;
  284.     }

  285.     /**
  286.      * List currently known pack files in the repository, without scanning.
  287.      *
  288.      * @return list of available packs, with some additional metadata. The
  289.      *         returned array is shared with the implementation and must not be
  290.      *         modified by the caller.
  291.      */
  292.     public PackList getCurrentPackList() {
  293.         return packList.get();
  294.     }

  295.     /**
  296.      * Does the requested object exist in this database?
  297.      * <p>
  298.      * This differs from ObjectDatabase's implementation in that we can selectively
  299.      * ignore unreachable (garbage) objects.
  300.      *
  301.      * @param objectId
  302.      *            identity of the object to test for existence of.
  303.      * @param avoidUnreachableObjects
  304.      *            if true, ignore objects that are unreachable.
  305.      * @return true if the specified object is stored in this database.
  306.      * @throws java.io.IOException
  307.      *             the object store cannot be accessed.
  308.      */
  309.     public boolean has(AnyObjectId objectId, boolean avoidUnreachableObjects)
  310.             throws IOException {
  311.         try (ObjectReader or = newReader()) {
  312.             or.setAvoidUnreachableObjects(avoidUnreachableObjects);
  313.             return or.has(objectId);
  314.         }
  315.     }

  316.     /**
  317.      * Generate a new unique name for a pack file.
  318.      *
  319.      * @param source
  320.      *            where the pack stream is created.
  321.      * @return a unique name for the pack file. Must not collide with any other
  322.      *         pack file name in the same DFS.
  323.      * @throws java.io.IOException
  324.      *             a new unique pack description cannot be generated.
  325.      */
  326.     protected abstract DfsPackDescription newPack(PackSource source)
  327.             throws IOException;

  328.     /**
  329.      * Generate a new unique name for a pack file.
  330.      *
  331.      * <p>
  332.      * Default implementation of this method would be equivalent to
  333.      * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the
  334.      * clients can override this method to use the given
  335.      * {@code estomatedPackSize} value more efficiently in the process of
  336.      * creating a new
  337.      * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object.
  338.      *
  339.      * @param source
  340.      *            where the pack stream is created.
  341.      * @param estimatedPackSize
  342.      *            the estimated size of the pack.
  343.      * @return a unique name for the pack file. Must not collide with any other
  344.      *         pack file name in the same DFS.
  345.      * @throws java.io.IOException
  346.      *             a new unique pack description cannot be generated.
  347.      */
  348.     protected DfsPackDescription newPack(PackSource source,
  349.             long estimatedPackSize) throws IOException {
  350.         DfsPackDescription pack = newPack(source);
  351.         pack.setEstimatedPackSize(estimatedPackSize);
  352.         return pack;
  353.     }

  354.     /**
  355.      * Commit a pack and index pair that was written to the DFS.
  356.      * <p>
  357.      * Committing the pack/index pair makes them visible to readers. The JGit
  358.      * DFS code always writes the pack, then the index. This allows a simple
  359.      * commit process to do nothing if readers always look for both files to
  360.      * exist and the DFS performs atomic creation of the file (e.g. stream to a
  361.      * temporary file and rename to target on close).
  362.      * <p>
  363.      * During pack compaction or GC the new pack file may be replacing other
  364.      * older files. Implementations should remove those older files (if any) as
  365.      * part of the commit of the new file.
  366.      * <p>
  367.      * This method is a trivial wrapper around
  368.      * {@link #commitPackImpl(Collection, Collection)} that calls the
  369.      * implementation and fires events.
  370.      *
  371.      * @param desc
  372.      *            description of the new packs.
  373.      * @param replaces
  374.      *            if not null, list of packs to remove.
  375.      * @throws java.io.IOException
  376.      *             the packs cannot be committed. On failure a rollback must
  377.      *             also be attempted by the caller.
  378.      */
  379.     protected void commitPack(Collection<DfsPackDescription> desc,
  380.             Collection<DfsPackDescription> replaces) throws IOException {
  381.         commitPackImpl(desc, replaces);
  382.         getRepository().fireEvent(new DfsPacksChangedEvent());
  383.     }

  384.     /**
  385.      * Implementation of pack commit.
  386.      *
  387.      * @see #commitPack(Collection, Collection)
  388.      * @param desc
  389.      *            description of the new packs.
  390.      * @param replaces
  391.      *            if not null, list of packs to remove.
  392.      * @throws java.io.IOException
  393.      *             the packs cannot be committed.
  394.      */
  395.     protected abstract void commitPackImpl(Collection<DfsPackDescription> desc,
  396.             Collection<DfsPackDescription> replaces) throws IOException;

  397.     /**
  398.      * Try to rollback a pack creation.
  399.      * <p>
  400.      * JGit DFS always writes the pack first, then the index. If the pack does
  401.      * not yet exist, then neither does the index. A safe DFS implementation
  402.      * would try to remove both files to ensure they are really gone.
  403.      * <p>
  404.      * A rollback does not support failures, as it only occurs when there is
  405.      * already a failure in progress. A DFS implementor may wish to log
  406.      * warnings/error messages when a rollback fails, but should not send new
  407.      * exceptions up the Java callstack.
  408.      *
  409.      * @param desc
  410.      *            pack to delete.
  411.      */
  412.     protected abstract void rollbackPack(Collection<DfsPackDescription> desc);

  413.     /**
  414.      * List the available pack files.
  415.      * <p>
  416.      * The returned list must support random access and must be mutable by the
  417.      * caller. It is sorted in place using the natural sorting of the returned
  418.      * DfsPackDescription objects.
  419.      *
  420.      * @return available packs. May be empty if there are no packs.
  421.      * @throws java.io.IOException
  422.      *             the packs cannot be listed and the object database is not
  423.      *             functional to the caller.
  424.      */
  425.     protected abstract List<DfsPackDescription> listPacks() throws IOException;

  426.     /**
  427.      * Open a pack, pack index, or other related file for reading.
  428.      *
  429.      * @param desc
  430.      *            description of pack related to the data that will be read.
  431.      *            This is an instance previously obtained from
  432.      *            {@link #listPacks()}, but not necessarily from the same
  433.      *            DfsObjDatabase instance.
  434.      * @param ext
  435.      *            file extension that will be read i.e "pack" or "idx".
  436.      * @return channel to read the file.
  437.      * @throws java.io.FileNotFoundException
  438.      *             the file does not exist.
  439.      * @throws java.io.IOException
  440.      *             the file cannot be opened.
  441.      */
  442.     protected abstract ReadableChannel openFile(
  443.             DfsPackDescription desc, PackExt ext)
  444.             throws FileNotFoundException, IOException;

  445.     /**
  446.      * Open a pack, pack index, or other related file for writing.
  447.      *
  448.      * @param desc
  449.      *            description of pack related to the data that will be written.
  450.      *            This is an instance previously obtained from
  451.      *            {@link #newPack(PackSource)}.
  452.      * @param ext
  453.      *            file extension that will be written i.e "pack" or "idx".
  454.      * @return channel to write the file.
  455.      * @throws java.io.IOException
  456.      *             the file cannot be opened.
  457.      */
  458.     protected abstract DfsOutputStream writeFile(
  459.             DfsPackDescription desc, PackExt ext) throws IOException;

  460.     void addPack(DfsPackFile newPack) throws IOException {
  461.         PackList o, n;
  462.         do {
  463.             o = packList.get();
  464.             if (o == NO_PACKS) {
  465.                 // The repository may not have needed any existing objects to
  466.                 // complete the current task of creating a pack (e.g. push of a
  467.                 // pack with no external deltas). Because we don't scan for
  468.                 // newly added packs on missed object lookups, scan now to
  469.                 // make sure all older packs are available in the packList.
  470.                 o = scanPacks(o);

  471.                 // Its possible the scan identified the pack we were asked to
  472.                 // add, as the pack was already committed via commitPack().
  473.                 // If this is the case return without changing the list.
  474.                 for (DfsPackFile p : o.packs) {
  475.                     if (p.key.equals(newPack.key)) {
  476.                         return;
  477.                     }
  478.                 }
  479.             }

  480.             DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length];
  481.             packs[0] = newPack;
  482.             System.arraycopy(o.packs, 0, packs, 1, o.packs.length);
  483.             n = new PackListImpl(packs, o.reftables);
  484.         } while (!packList.compareAndSet(o, n));
  485.     }

  486.     void addReftable(DfsPackDescription add, Set<DfsPackDescription> remove)
  487.             throws IOException {
  488.         PackList o, n;
  489.         do {
  490.             o = packList.get();
  491.             if (o == NO_PACKS) {
  492.                 o = scanPacks(o);
  493.                 for (DfsReftable t : o.reftables) {
  494.                     if (t.getPackDescription().equals(add)) {
  495.                         return;
  496.                     }
  497.                 }
  498.             }

  499.             List<DfsReftable> tables = new ArrayList<>(1 + o.reftables.length);
  500.             for (DfsReftable t : o.reftables) {
  501.                 if (!remove.contains(t.getPackDescription())) {
  502.                     tables.add(t);
  503.                 }
  504.             }
  505.             tables.add(new DfsReftable(add));
  506.             n = new PackListImpl(o.packs, tables.toArray(new DfsReftable[0]));
  507.         } while (!packList.compareAndSet(o, n));
  508.     }

  509.     PackList scanPacks(PackList original) throws IOException {
  510.         PackList o, n;
  511.         synchronized (packList) {
  512.             do {
  513.                 o = packList.get();
  514.                 if (o != original) {
  515.                     // Another thread did the scan for us, while we
  516.                     // were blocked on the monitor above.
  517.                     //
  518.                     return o;
  519.                 }
  520.                 n = scanPacksImpl(o);
  521.                 if (n == o)
  522.                     return n;
  523.             } while (!packList.compareAndSet(o, n));
  524.         }
  525.         getRepository().fireEvent(new DfsPacksChangedEvent());
  526.         return n;
  527.     }

  528.     private PackList scanPacksImpl(PackList old) throws IOException {
  529.         DfsBlockCache cache = DfsBlockCache.getInstance();
  530.         Map<DfsPackDescription, DfsPackFile> packs = packMap(old);
  531.         Map<DfsPackDescription, DfsReftable> reftables = reftableMap(old);

  532.         List<DfsPackDescription> scanned = listPacks();
  533.         Collections.sort(scanned, packComparator);

  534.         List<DfsPackFile> newPacks = new ArrayList<>(scanned.size());
  535.         List<DfsReftable> newReftables = new ArrayList<>(scanned.size());
  536.         boolean foundNew = false;
  537.         for (DfsPackDescription dsc : scanned) {
  538.             DfsPackFile oldPack = packs.remove(dsc);
  539.             if (oldPack != null) {
  540.                 newPacks.add(oldPack);
  541.             } else if (dsc.hasFileExt(PackExt.PACK)) {
  542.                 newPacks.add(new DfsPackFile(cache, dsc));
  543.                 foundNew = true;
  544.             }

  545.             DfsReftable oldReftable = reftables.remove(dsc);
  546.             if (oldReftable != null) {
  547.                 newReftables.add(oldReftable);
  548.             } else if (dsc.hasFileExt(PackExt.REFTABLE)) {
  549.                 newReftables.add(new DfsReftable(cache, dsc));
  550.                 foundNew = true;
  551.             }
  552.         }

  553.         if (newPacks.isEmpty() && newReftables.isEmpty())
  554.             return new PackListImpl(NO_PACKS.packs, NO_PACKS.reftables);
  555.         if (!foundNew) {
  556.             old.clearDirty();
  557.             return old;
  558.         }
  559.         Collections.sort(newReftables, reftableComparator());
  560.         return new PackListImpl(
  561.                 newPacks.toArray(new DfsPackFile[0]),
  562.                 newReftables.toArray(new DfsReftable[0]));
  563.     }

  564.     private static Map<DfsPackDescription, DfsPackFile> packMap(PackList old) {
  565.         Map<DfsPackDescription, DfsPackFile> forReuse = new HashMap<>();
  566.         for (DfsPackFile p : old.packs) {
  567.             if (!p.invalid()) {
  568.                 forReuse.put(p.desc, p);
  569.             }
  570.         }
  571.         return forReuse;
  572.     }

  573.     private static Map<DfsPackDescription, DfsReftable> reftableMap(PackList old) {
  574.         Map<DfsPackDescription, DfsReftable> forReuse = new HashMap<>();
  575.         for (DfsReftable p : old.reftables) {
  576.             if (!p.invalid()) {
  577.                 forReuse.put(p.desc, p);
  578.             }
  579.         }
  580.         return forReuse;
  581.     }

  582.     /**
  583.      * Get comparator to sort {@link DfsReftable} by priority.
  584.      *
  585.      * @return comparator to sort {@link DfsReftable} by priority.
  586.      */
  587.     protected Comparator<DfsReftable> reftableComparator() {
  588.         return Comparator.comparing(
  589.                 DfsReftable::getPackDescription,
  590.                 DfsPackDescription.reftableComparator());
  591.     }

  592.     /**
  593.      * Clears the cached list of packs, forcing them to be scanned again.
  594.      */
  595.     protected void clearCache() {
  596.         packList.set(NO_PACKS);
  597.     }

  598.     /** {@inheritDoc} */
  599.     @Override
  600.     public void close() {
  601.         packList.set(NO_PACKS);
  602.     }

  603.     /** Snapshot of packs scanned in a single pass. */
  604.     public abstract static class PackList {
  605.         /** All known packs, sorted. */
  606.         public final DfsPackFile[] packs;

  607.         /** All known reftables, sorted. */
  608.         public final DfsReftable[] reftables;

  609.         private long lastModified = -1;

  610.         PackList(DfsPackFile[] packs, DfsReftable[] reftables) {
  611.             this.packs = packs;
  612.             this.reftables = reftables;
  613.         }

  614.         /** @return last modified time of all packs, in milliseconds. */
  615.         public long getLastModified() {
  616.             if (lastModified < 0) {
  617.                 long max = 0;
  618.                 for (DfsPackFile pack : packs) {
  619.                     max = Math.max(max, pack.getPackDescription().getLastModified());
  620.                 }
  621.                 lastModified = max;
  622.             }
  623.             return lastModified;
  624.         }

  625.         abstract boolean dirty();
  626.         abstract void clearDirty();

  627.         /**
  628.          * Mark pack list as dirty.
  629.          * <p>
  630.          * Used when the caller knows that new data might have been written to the
  631.          * repository that could invalidate open readers depending on this pack list,
  632.          * for example if refs are newly scanned.
  633.          */
  634.         public abstract void markDirty();
  635.     }

  636.     private static final class PackListImpl extends PackList {
  637.         private volatile boolean dirty;

  638.         PackListImpl(DfsPackFile[] packs, DfsReftable[] reftables) {
  639.             super(packs, reftables);
  640.         }

  641.         @Override
  642.         boolean dirty() {
  643.             return dirty;
  644.         }

  645.         @Override
  646.         void clearDirty() {
  647.             dirty = false;
  648.         }

  649.         @Override
  650.         public void markDirty() {
  651.             dirty = true;
  652.         }
  653.     }
  654. }