arc.c revision 244781f10dcd82684fd8163c016540667842f203
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (rangeing from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal arc algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* arc list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each arc state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an arc list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* Arc buffers may have an associated eviction callback function.
* This function will be invoked prior to removing the buffer (e.g.
* in arc_do_user_evicts()). Note however that the data associated
* with the buffer may be evicted prior to the callback. The callback
* must be made with *no locks held* (to prevent deadlock). Additionally,
* the users of callbacks must ensure that their private data is
* protected from simultaneous callbacks from arc_clear_callback()
* and arc_do_user_evicts().
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
#include <sys/zio_compress.h>
#include <sys/zfs_context.h>
#include <sys/refcount.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#ifdef _KERNEL
#endif
#include <zfs_fletcher.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
int arc_procfd;
#endif
static kmutex_t arc_reclaim_lock;
static kcondvar_t arc_reclaim_thread_cv;
static boolean_t arc_reclaim_thread_exit;
static kcondvar_t arc_reclaim_waiters_cv;
static kmutex_t arc_user_evicts_lock;
static kcondvar_t arc_user_evicts_cv;
static boolean_t arc_user_evicts_thread_exit;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
int zfs_arc_evict_batch_limit = 10;
/*
* The number of sublists used for each of the arc state lists. If this
* is not set to a suitable value by the user, it will be configured to
* the number of CPUs on the system in arc_init().
*/
int zfs_arc_num_sublists_per_state = 0;
/* number of seconds before growing cache again */
static int arc_grow_retry = 60;
/* shift of arc_c for calculating overflow limit in arc_get_data_buf */
int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
static int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
static int arc_shrink_shift = 7;
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_lifespan;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
static int arc_dead;
/*
* The arc has filled available memory and has now warmed up.
*/
/*
* These tunables are for performance analysis.
*/
uint64_t zfs_arc_meta_min = 0;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_disable_dup_eviction = 0;
/*
* Note that buffers can be in one of 6 states:
* ARC_anon - anonymous (discussed below)
* ARC_mru - recently used, currently cached
* ARC_mru_ghost - recentely used, no longer in cache
* ARC_mfu - frequently used, currently cached
* ARC_mfu_ghost - frequently used, no longer in cache
* ARC_l2c_only - exists in L2ARC but not other states
* When there are no active references to the buffer, they are
* are linked onto a list in one of these arc states. These are
* the only buffers that can be evicted or deleted. Within each
* state there are multiple lists, one for meta-data and one for
* non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
* etc.) is tracked separately so that it can be managed more
* explicitly: favored over data, limited explicitly.
*
* Anonymous buffers are buffers that are not associated with
* a DVA. These are buffers that hold dirty block copies
* before they are written to stable storage. By definition,
* they are "ref'd" and are considered part of arc_mru
* that cannot be freed. Generally, they will aquire a DVA
* as they are written and migrate onto the arc_mru list.
*
* The ARC_l2c_only state is for buffers that are in the second
* level ARC but no longer in any of the ARC_m* lists. The second
* level ARC itself may also contain buffers that are in any of
* the ARC_m* states - meaning that a buffer can exist in two
* places. The reason for the ARC_l2c_only state is to keep the
* buffer header in the hash table, so that reads that hit the
* second level ARC benefit from these fast lookups.
*/
typedef struct arc_state {
/*
* list of evictable buffers
*/
/*
* total amount of evictable data in this state
*/
/*
* total amount of data in this state; this includes: evictable,
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
*/
} arc_state_t;
/* The 6 states: */
static arc_state_t ARC_anon;
static arc_state_t ARC_mru;
static arc_state_t ARC_mru_ghost;
static arc_state_t ARC_mfu;
static arc_state_t ARC_mfu_ghost;
static arc_state_t ARC_l2c_only;
typedef struct arc_stats {
/*
* Number of buffers that could not be evicted because the hash lock
* was held by another thread. The lock may not necessarily be held
* by something using the same buffer, since hash locks are shared
* by multiple buffers.
*/
/*
* Number of buffers skipped because they have I/O in progress, are
* indrect prefetch buffers that have not lived long enough, or are
* not from the spa we're trying to evict from.
*/
/*
* Number of times arc_evict_state() was unable to evict enough
* buffers to reach it's target amount.
*/
/*
* Number of bytes consumed by internal ARC structures necessary
* for tracking purposes; these structures are not actually
* backed by ARC buffers. This includes arc_buf_hdr_t structures
* (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
* caches), and arc_buf_t structures (allocated via arc_buf_t
* cache).
*/
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_DATA. This is generally consumed by buffers backing
* on disk user data (e.g. plain file contents).
*/
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_METADATA. This is generally consumed by buffers
* backing on disk data that is used for internal ZFS
* structures (e.g. ZAP, dnode, indirect blocks, etc).
*/
/*
* Number of bytes consumed by various buffers and structures
* not actually backed with ARC buffers. This includes bonus
* buffers (allocated directly via zio_buf_* functions),
* dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
* cache), and dnode_t structures (allocated via dnode_t cache).
*/
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_anon state. This includes *all* buffers in the arc_anon
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mru state. This includes *all* buffers in the arc_mru
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mru_ghost state. The key thing to note
* here, is the fact that this size doesn't actually indicate
* RAM consumption. The ghost lists only consist of headers and
* don't actually have ARC buffers linked off of these headers.
* Thus, *if* the headers had associated ARC buffers, these
* buffers *would have* consumed this number of bytes.
*/
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
*/
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mfu state. This includes *all* buffers in the arc_mfu
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
* state.
*/
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_METADATA, and reside in the
* arc_mfu state.
*/
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mfu_ghost state. See the comment above
* arcstat_mru_ghost_size for more details.
*/
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
*/
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
} arc_stats_t;
static arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "other_size", KSTAT_DATA_UINT64 },
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_cdata_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_compress_successes", KSTAT_DATA_UINT64 },
{ "l2_compress_zeros", KSTAT_DATA_UINT64 },
{ "l2_compress_failures", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "duplicate_buffers", KSTAT_DATA_UINT64 },
{ "duplicate_buffers_size", KSTAT_DATA_UINT64 },
{ "duplicate_reads", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 }
};
uint64_t m; \
continue; \
}
#define ARCSTAT_MAXSTAT(stat) \
/*
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
if (cond1) { \
if (cond2) { \
} else { \
} \
} else { \
if (cond2) { \
} else { \
} \
}
static arc_state_t *arc_anon;
static arc_state_t *arc_mru;
static arc_state_t *arc_mru_ghost;
static arc_state_t *arc_mfu;
static arc_state_t *arc_mfu_ghost;
static arc_state_t *arc_l2c_only;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define L2ARC_IS_VALID_COMPRESS(_c_) \
static int arc_no_grow; /* Don't try to grow cache size */
static uint64_t arc_tempreserve;
static uint64_t arc_loaned_bytes;
typedef struct arc_callback arc_callback_t;
struct arc_callback {
void *acb_private;
};
typedef struct arc_write_callback arc_write_callback_t;
struct arc_write_callback {
void *awcb_private;
};
/*
* ARC buffers are separated into multiple structs as a memory saving measure:
* - Common fields struct, always defined, and embedded within it:
* - L2-only fields, always allocated but undefined when not in L2ARC
* - L1-only fields, only allocated when in L1ARC
*
* Buffer in L1 Buffer only in L2
* +------------------------+ +------------------------+
* | arc_buf_hdr_t | | arc_buf_hdr_t |
* | | | |
* | | | |
* | | | |
* +------------------------+ +------------------------+
* | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
* | (undefined if L1-only) | | |
* +------------------------+ +------------------------+
* | l1arc_buf_hdr_t |
* | |
* | |
* | |
* | |
* +------------------------+
*
* Because it's possible for the L2ARC to become extremely large, we can wind
* up eating a lot of memory in L2ARC buffer headers, so the size of a header
* is minimized by only allocating the fields necessary for an L1-cached buffer
* when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
* l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
* words in pointers. arc_hdr_realloc() is used to switch a header between
* these two allocation states.
*/
typedef struct l1arc_buf_hdr {
#ifdef ZFS_DEBUG
/*
* used for debugging wtih kmem_flags - by allocating and freeing
* b_thawed when the buffer is thawed, we get a record of the stack
* trace that thawed it.
*/
void *b_thawed;
#endif
/* for waiting on writes to complete */
/* protected by arc state mutex */
/* updated atomically */
/* self protecting */
/* temporary buffer holder for in-flight compressed data */
void *b_tmp_cdata;
typedef struct l2arc_dev l2arc_dev_t;
typedef struct l2arc_buf_hdr {
/* protected by arc_buf_hdr mutex */
/* real alloc'd buffer size depending on b_compress applied */
struct arc_buf_hdr {
/* protected by hash lock */
/*
* the L1 cache, it needs to be in the set of common fields because it
* must be preserved from the time before a buffer is written out to
* L2ARC until after it is read back in.
*/
/* immutable */
/* L2ARC fields. Undefined when not in L2ARC. */
/* L1ARC fields. Undefined when in l2arc_only state */
};
static arc_buf_t *arc_eviction_list;
static arc_buf_hdr_t arc_eviction_hdr;
#define GHOST_STATE(state) \
(state) == arc_l2c_only)
#define HDR_L2_READING(hdr) \
#define HDR_ISTYPE_METADATA(hdr) \
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET 24
#define HDR_COMPRESS_NBITS 7
/*
* Other sizes
*/
/*
* Hash table routines
*/
#define HT_LOCK_PAD 64
struct ht_lock {
#ifdef _KERNEL
#endif
};
#define BUF_LOCKS 256
typedef struct buf_hash_table {
static buf_hash_table_t buf_hash_table;
/*
* Level 2 ARC
*/
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
/* L2ARC Performance Tunables */
/*
* L2ARC Internals
*/
struct l2arc_dev {
};
typedef struct l2arc_read_callback {
int l2rcb_flags; /* original flags */
typedef struct l2arc_write_callback {
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
void *l2df_data;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static void arc_get_data_buf(arc_buf_t *);
static boolean_t arc_is_overflowing();
static void arc_buf_watch(arc_buf_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
static uint64_t
{
int i;
for (i = 0; i < sizeof (dva_t); i++)
return (crc);
}
static void
{
}
static arc_buf_hdr_t *
{
return (hdr);
}
}
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
{
uint32_t i;
} else {
}
return (fhdr);
}
/* collect some hash table performance data */
if (i > 0) {
if (i == 1)
}
return (NULL);
}
static void
{
}
/* collect some hash table performance data */
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
int i;
for (i = 0; i < BUF_LOCKS; i++)
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
/* ARGSUSED */
static void
{
}
/* ARGSUSED */
static void
{
}
/* ARGSUSED */
static void
{
}
/*
* Reclaim callback -- invoked when memory is low.
*/
/* ARGSUSED */
static void
{
dprintf("hdr_recl called\n");
/*
* umem calls the reclaim func when we destroy the buf cache,
* which is after we do arc_fini().
*/
if (!arc_dead)
}
static void
buf_init(void)
{
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
hsize <<= 1;
hsize >>= 1;
goto retry;
}
for (i = 0; i < 256; i++)
for (i = 0; i < BUF_LOCKS; i++) {
}
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
{
if (new == hdr_full_cache) {
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
/* Verify previous threads set to NULL before freeing */
} else {
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field
* might try to be accessed, even though it was removed.
*/
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
return (nhdr);
}
static void
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
return;
}
panic("buffer modified while frozen!");
}
static int
{
int equal;
return (equal);
}
static void
{
return;
return;
}
}
#ifndef _KERNEL
typedef struct procctl {
long cmd;
} procctl_t;
#endif
/* ARGSUSED */
static void
{
#ifndef _KERNEL
if (arc_watch) {
int result;
}
#endif
}
/* ARGSUSED */
static void
{
#ifndef _KERNEL
if (arc_watch) {
int result;
}
#endif
}
static arc_buf_contents_t
{
if (HDR_ISTYPE_METADATA(hdr)) {
return (ARC_BUFC_METADATA);
} else {
return (ARC_BUFC_DATA);
}
}
static uint32_t
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
{
if (zfs_flags & ZFS_DEBUG_MODIFY) {
panic("modifying non-anon buffer!");
panic("modifying buffer while i/o in progress!");
}
}
#ifdef ZFS_DEBUG
if (zfs_flags & ZFS_DEBUG_MODIFY) {
}
#endif
}
void
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
}
static void
{
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
if (GHOST_STATE(state)) {
}
}
/* remove the prefetch flag if we get a reference */
}
}
static int
{
int cnt;
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
}
return (cnt);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
{
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
} else {
refcnt = 0;
datacnt = 0;
}
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
/*
* If prefetching out of the ghost cache,
* we will have a non-zero datacnt.
*/
/* ghost elements have a ghost size */
}
}
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
/* ghost elements have a ghost size */
if (GHOST_STATE(new_state)) {
}
}
}
/* adjust state sizes (ignore arc_l2c_only) */
}
if (HDR_HAS_L1HDR(hdr))
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated.
*/
}
void
{
switch (type) {
case ARC_SPACE_DATA:
break;
case ARC_SPACE_META:
break;
case ARC_SPACE_OTHER:
break;
case ARC_SPACE_HDRS:
break;
case ARC_SPACE_L2HDRS:
break;
}
if (type != ARC_SPACE_DATA)
}
void
{
switch (type) {
case ARC_SPACE_DATA:
break;
case ARC_SPACE_META:
break;
case ARC_SPACE_OTHER:
break;
case ARC_SPACE_HDRS:
break;
case ARC_SPACE_L2HDRS:
break;
}
if (type != ARC_SPACE_DATA) {
if (arc_meta_max < arc_meta_used)
}
}
{
return (buf);
}
static char *arc_onloan_tag = "onloan";
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
{
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
{
}
/* Detach an arc_buf from a dbuf (tag) */
void
{
}
static arc_buf_t *
{
/*
* This buffer already exists in the arc so create a duplicate
* copy for the caller. If the buffer is associated with user data
* then track the size and number of duplicates. These stats will be
* updated as duplicate buffers are created and destroyed.
*/
if (HDR_ISTYPE_DATA(hdr)) {
}
return (buf);
}
void
{
/*
* Check to see if this buffer is evicted. Callers
* must verify b_data != NULL to know if the add_ref
* was successful.
*/
return;
}
}
static void
{
}
/*
* Free the arc data buffer. If it is an l2arc write in progress,
* the buffer is placed on l2arc_free_on_write to be freed later.
*/
static void
{
if (HDR_L2_WRITING(hdr)) {
} else {
}
}
static void
{
/*
* The b_tmp_cdata field is linked off of the b_l1hdr, so if
* that doesn't exist, the header is in the arc_l2c_only state,
* and there isn't anything to free (it's already been freed).
*/
if (!HDR_HAS_L1HDR(hdr))
return;
/*
* The header isn't being written to the l2arc device, thus it
* shouldn't have a b_tmp_cdata to free.
*/
if (!HDR_L2_WRITING(hdr)) {
return;
}
/*
* The header does not have compression enabled. This can be due
* to the buffer not being compressible, or because we're
* freeing the buffer before the second phase of
* l2arc_write_buffer() has started (which does the compression
* step). In either case, b_tmp_cdata does not point to a
* separately compressed buffer, so there's nothing to free (it
* points to the same buffer as the arc_buf_t's b_data field).
*/
return;
}
/*
* There's nothing to free since the buffer was all zero's and
* compressed to a zero length buffer.
*/
return;
}
}
/*
* Free up buf->b_data and if 'remove' is set, then pull the
* arc_buf_t off of the the arc_buf_hdr_t's list and free it.
*/
static void
{
/* free up data associated with the buf */
if (type == ARC_BUFC_METADATA) {
} else {
}
/* protected by hash lock, if in the hash table */
}
/*
* If we're destroying a duplicate buffer make sure
* that the appropriate statistics are updated.
*/
}
}
/* only remove the buf if requested */
if (!remove)
return;
/* remove the buf from the hdr list */
continue;
/* clean up the buf */
}
static void
{
if (HDR_HAS_L1HDR(hdr)) {
}
if (HDR_HAS_L2HDR(hdr)) {
if (!buflist_held) {
}
/*
* We don't want to leak the b_tmp_cdata buffer that was
* allocated in l2arc_write_buffers()
*/
if (!buflist_held)
}
}
if (HDR_HAS_L1HDR(hdr)) {
} else {
}
}
#ifdef ZFS_DEBUG
}
#endif
}
if (HDR_HAS_L1HDR(hdr)) {
} else {
}
}
void
{
if (hashed) {
} else {
}
} else if (HDR_IO_IN_PROGRESS(hdr)) {
int destroy_hdr;
/*
* We are in the middle of an async write. Don't destroy
* this buffer unless the write completes before we finish
* decrementing the reference count.
*/
if (destroy_hdr)
} else {
else
}
}
{
return (no_callback);
}
if (no_callback)
} else if (no_callback) {
}
return (no_callback);
}
{
}
/*
* Called from the DMU to determine if the current buffer should be
* evicted. In order to ensure proper locking, the eviction must be initiated
* from the DMU. Return true if the buffer is associated with user data and
* duplicate buffers still exist.
*/
{
return (B_FALSE);
/*
* We are in arc_do_user_evicts(); let that function
* perform the eviction.
*/
return (B_FALSE);
/*
* We have already been added to the arc eviction list;
* recommend eviction.
*/
return (B_TRUE);
}
return (evict_needed);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on it's state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*/
static int64_t
{
int64_t bytes_evicted = 0;
if (GHOST_STATE(state)) {
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. it's b_tmp_cdata field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing it's L1 piece) until the header is
* done being written to the l2arc.
*/
return (bytes_evicted);
}
if (HDR_HAS_L2HDR(hdr)) {
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
} else {
}
return (bytes_evicted);
}
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
return (bytes_evicted);
}
break;
}
} else {
}
}
if (HDR_HAS_L2HDR(hdr)) {
} else {
else
}
}
return (bytes_evicted);
}
static uint64_t
{
uint64_t bytes_evicted = 0;
int evict_count = 0;
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
continue;
/* we're only interested in evicting buffers of a certain spa */
continue;
}
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
if (mutex_tryenter(hash_lock)) {
bytes_evicted += evicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count++;
/*
* If arc_size isn't overflowing, signal any
* threads that might happen to be waiting.
*
* For each header evicted, we wake up a single
* thread. If we used cv_broadcast, we could
* wake up "too many" threads causing arc_size
* to significantly overflow arc_c; since
* arc_get_data_buf() doesn't check for overflow
* when it's woken up (it doesn't because it's
* possible for the ARC to be overflowing while
* full of un-evictable buffers, and the
* function should proceed in this case).
*
* If threads are left sleeping, due to not
* using cv_broadcast, they will be woken up
* just before arc_reclaim_thread() sleeps.
*/
if (!arc_is_overflowing())
} else {
}
}
return (bytes_evicted);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
{
uint64_t total_evicted = 0;
int num_sublists;
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
for (int i = 0; i < num_sublists; i++) {
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_adjust_type() and
* arc_evict_state_impl().
*/
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
uint64_t scan_evicted = 0;
for (int i = 0; i < num_sublists; i++) {
if (bytes == ARC_EVICT_ALL)
else if (total_evicted < bytes)
else
break;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
}
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
{
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
{
}
return (0);
}
/*
* Evict metadata buffers from the cache, such that arc_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_adjust_meta(void)
{
uint64_t total_evicted = 0;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space alloted to the MFU (which is defined as arc_c - arc_p).
*/
return (total_evicted);
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
{
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
break;
}
break;
}
} else {
/* The headers can't be on the sublist without an L1 header */
} else {
}
}
return (type);
}
/*
* Evict buffers from the cache, such that arc_size is capped by arc_c.
*/
static uint64_t
arc_adjust(void)
{
uint64_t total_evicted = 0;
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_adjust_meta();
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
} else {
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
}
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
} else {
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
total_evicted += bytes;
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
total_evicted += bytes;
return (total_evicted);
}
static void
arc_do_user_evicts(void)
{
while (arc_eviction_list != NULL) {
}
}
void
{
/*
* If retry is TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
}
void
{
else
}
(void) arc_adjust();
}
typedef enum free_memory_reason_t {
/*
* Additional reserve of pages for pp_reserve.
*/
/*
* Additional reserve of pages for swapfs.
*/
/*
* Return the amount of memory that can be consumed before reclaim will be
* needed. Positive if there is sufficient free memory, negative indicates
* the amount of memory that needs to be freed up.
*/
static int64_t
arc_available_memory(void)
{
int64_t n;
#ifdef _KERNEL
if (needfree > 0) {
if (n < lowest) {
lowest = n;
r = FMR_NEEDFREE;
}
}
/*
* check that we're out of range of the pageout scanner. It starts to
* schedule paging if freemem is less than lotsfree and needfree.
* lotsfree is the high-water mark for pageout, and needfree is the
* number of needed free pages. We add extra pages here to make sure
* the scanner doesn't start up while we're freeing memory.
*/
if (n < lowest) {
lowest = n;
r = FMR_LOTSFREE;
}
/*
* check to make sure that swapfs has enough space so that anon
* reservations can still succeed. anon_resvmem() checks that the
* availrmem is greater than swapfs_minfree, and the number of reserved
* swap pages. We also add a bit of extra here just to prevent
* circumstances from getting really dire.
*/
if (n < lowest) {
lowest = n;
r = FMR_SWAPFS_MINFREE;
}
/*
* Check that we have enough availrmem that memory locking (e.g., via
* mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
* stores the number of pages that cannot be locked; when availrmem
* drops below pages_pp_maximum, page locking mechanisms such as
* page_pp_lock() will fail.)
*/
if (n < lowest) {
lowest = n;
r = FMR_PAGES_PP_MAXIMUM;
}
#if defined(__i386)
/*
* If we're on an i386 platform, it's possible that we'll exhaust the
* kernel heap space before we ever run out of available physical
* memory. Most checks of the size of the heap_area compare against
* tune.t_minarmem, which is the minimum available real memory that we
* can have in the system. However, this is generally fixed at 25 pages
* which is so low that it's useless. In this comparison, we seek to
* calculate the total heap-size, and reclaim if more than 3/4ths of the
* heap is allocated. (Or, in the calculation, if less than 1/4th is
* free)
*/
if (n < lowest) {
lowest = n;
r = FMR_HEAP_ARENA;
}
#endif
/*
* If zio data pages are being allocated out of a separate heap segment,
* then enforce that the size of available vmem for this arena remains
* above about 1/16th free.
*
* Note: The 1/16th arena free requirement was put in place
* to aggressively evict memory from the arc in order to avoid
* memory fragmentation issues.
*/
if (n < lowest) {
lowest = n;
r = FMR_ZIO_ARENA;
}
}
#else
/* Every 100 calls, free a small amount */
if (spa_get_random(100) == 0)
lowest = -1024;
#endif
last_free_reason = r;
return (lowest);
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
static boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
static void
arc_kmem_reap_now(void)
{
size_t i;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
extern kmem_cache_t *range_seg_cache;
#ifdef _KERNEL
if (arc_meta_used >= arc_meta_limit) {
/*
* We are exceeding our meta-data cache limit.
* Purge some DNLC entries to release holds on meta-data.
*/
}
#if defined(__i386)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
}
if (zio_data_buf_cache[i] != prev_data_cache) {
}
}
/*
* Ask the vmem arena to reclaim unused memory from its
* quantum caches.
*/
}
}
/*
* Threads can block in arc_get_data_buf() waiting for this thread to evict
* enough data and signal them to proceed. When this happens, the threads in
* arc_get_data_buf() are sleeping while holding the hash lock for their
* particular arc header. Thus, we must be careful to never sleep on a
* hash lock in this thread. This is to prevent the following deadlock:
*
* - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L",
* waiting for the reclaim thread to signal it.
*
* - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
* fails, and goes to sleep forever.
*
* This possible deadlock is avoided by always acquiring a hash lock
* using mutex_tryenter() from arc_reclaim_thread().
*/
static void
arc_reclaim_thread(void)
{
while (!arc_reclaim_thread_exit) {
if (free_memory < 0) {
/*
* Wait at least zfs_grow_retry (default 60) seconds
* before considering growing.
*/
/*
* If we are still low on memory, shrink the ARC
* so that we have arc_shrink_min free space.
*/
if (to_free > 0) {
#ifdef _KERNEL
#endif
}
} else if (ddi_get_lbolt() >= growtime) {
}
evicted = arc_adjust();
/*
* If evicted is zero, we couldn't evict anything via
* arc_adjust(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop.
*/
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* up any threads before we go to sleep.
*/
/*
* Block until signaled, or after one second (we
* might need to perform arc_kmem_reap_now()
* even if we aren't being signalled)
*/
(void) cv_timedwait(&arc_reclaim_thread_cv,
}
}
thread_exit();
}
static void
arc_user_evicts_thread(void)
{
while (!arc_user_evicts_thread_exit) {
/*
* This is necessary in order for the mdb ::arc dcmd to
* show up to date information. Since the ::arc command
* does not call the kstat's update function, without
* this call, the command may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this change, the data might be up to 1 second
* out of date; but that should suffice. The arc_state_t
* structures can be queried directly if more accurate
* information is needed.
*/
/*
* Block until signaled, or after one second (we need to
* call the arc's kstat update function regularly).
*/
(void) cv_timedwait(&arc_user_evicts_cv,
}
thread_exit();
}
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are comming from. This function is only called
* when we are adding new content to the cache.
*/
static void
{
int mult;
if (state == arc_l2c_only)
return;
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
} else if (state == arc_mfu_ghost) {
}
if (arc_reclaim_needed()) {
return;
}
if (arc_no_grow)
return;
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
}
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static boolean_t
arc_is_overflowing(void)
{
/* Always allow at least one block of overflow */
}
/*
* The buffer, supplied as the first argument, needs a data block. If we
* are hitting the hard limit for the cache size, we must sleep, waiting
* for the eviction thread to catch up. If we're past the target size
* but below the hard limit, we'll only signal the reclaim thread and
* continue on.
*/
static void
{
/*
* If arc_size is currently overflowing, and has grown past our
* upper limit, we must be adding data faster than the evict
* thread can evict. Thus, to ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we halt and wait for the
* eviction thread to catch up.
*
* It's also possible that the reclaim thread is unable to evict
* enough buffers to get arc_size below the overflow limit (e.g.
* due to buffers being un-evictable, or hash lock collisions).
* In this case, we want to proceed regardless if we're
* overflowing; thus we don't use a while loop here.
*/
if (arc_is_overflowing()) {
/*
* Now that we've acquired the lock, we may no longer be
* over the overflow limit, lets check.
*
* We're ignoring the case of spurious wake ups. If that
* were to happen, it'd let this thread consume an ARC
* buffer before it should have (i.e. before we're under
* the overflow limit and were signalled by the reclaim
* thread). As long as that is a rare occurrence, it
* shouldn't cause any harm.
*/
if (arc_is_overflowing()) {
}
}
if (type == ARC_BUFC_METADATA) {
} else {
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
size);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
{
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr)) {
/* link protected by hash lock */
} else {
}
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
}
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr)) {
} else {
}
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
if ((HDR_PREFETCH(hdr)) != 0) {
/* link protected by hash_lock */
}
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
}
/*
* This buffer is on the 2nd Level ARC.
*/
} else {
ASSERT(!"invalid arc state");
}
}
/* a generic arc_done_func_t which you can use */
/* ARGSUSED */
void
{
}
/* a generic arc_done_func_t */
void
{
} else {
}
}
static void
{
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
&hash_lock);
}
/* byteswap if necessary */
}
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
}
/* create copies of the data buffer for the callers */
}
}
}
}
callback_list != NULL);
if (HDR_IN_HASH_TABLE(hdr))
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
}
/* execute each callback and free its structure */
}
}
if (freeable)
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
{
top:
if (!BP_IS_EMBEDDED(bp)) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
}
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
if (*arc_flags & ARC_FLAG_WAIT) {
goto top;
}
if (done) {
KM_SLEEP);
return (0);
}
return (0);
}
if (done) {
/*
* If this block is already in use, create a new
* copy of the data so that we will be guaranteed
* that arc_release() will always succeed.
*/
if (HDR_BUF_AVAILABLE(hdr)) {
} else {
}
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
}
if (*arc_flags & ARC_FLAG_L2CACHE)
if (*arc_flags & ARC_FLAG_L2COMPRESS)
if (done)
} else {
/* this block is not in the cache */
if (!BP_IS_EMBEDDED(bp)) {
}
/* somebody beat us to the hash insert */
goto top; /* restart the IO request */
}
/* if this is a prefetch, we don't have a reference */
if (*arc_flags & ARC_FLAG_PREFETCH) {
private);
}
if (*arc_flags & ARC_FLAG_L2CACHE)
if (*arc_flags & ARC_FLAG_L2COMPRESS)
if (BP_GET_LEVEL(bp) > 0)
} else {
/*
* This block is in the ghost cache. If it was L2-only
* (and thus didn't have an L1 hdr), we realloc the
* header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
}
/* if this is a prefetch, we don't have a reference */
if (*arc_flags & ARC_FLAG_PREFETCH)
else
if (*arc_flags & ARC_FLAG_L2CACHE)
if (*arc_flags & ARC_FLAG_L2COMPRESS)
}
if (HDR_HAS_L2HDR(hdr) &&
/*
* Lock out device removal.
*/
if (vdev_is_dead(vd) ||
}
/*
* At this point, we have a level 1 cache miss. Try again in
* L2ARC if possible.
*/
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch and l2arc_noprefetch is set.
*/
if (HDR_HAS_L2HDR(hdr) &&
KM_SLEEP);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
if (b_compress == ZIO_COMPRESS_EMPTY) {
} else {
}
if (*arc_flags & ARC_FLAG_NOWAIT) {
return (0);
}
return (0);
/* l2arc read error; goto zio_read() */
} else {
arc_buf_hdr_t *, hdr);
if (HDR_L2_WRITING(hdr))
}
} else {
if (l2arc_ndev != 0) {
arc_buf_hdr_t *, hdr);
}
}
if (*arc_flags & ARC_FLAG_WAIT)
}
return (0);
}
void
{
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
{
return;
if (HDR_BUF_AVAILABLE(hdr)) {
} else {
}
}
/*
* Clear the user eviction callback set by arc_set_callback(), first calling
* it if it exists. Because the presence of a callback keeps an arc_buf cached
* clearing the callback may result in the arc_buf being destroyed. However,
* it will not result in the *last* arc_buf being destroyed, hence the data
* will remain cached in the ARC. We make a copy of the arc buffer here so
* that we can process the callback without holding any locks.
*
* It's possible that the callback is already in the process of being cleared
* by another thread. In this case we can not clear the callback.
*
* Returns B_TRUE if the callback was successfully called and cleared.
*/
{
/*
* We are in arc_do_user_evicts().
*/
return (B_FALSE);
/*
* We are on the eviction list; process this buffer now
* but let arc_do_user_evicts() do the reaping.
*/
return (B_TRUE);
}
} else {
}
return (B_TRUE);
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
{
/*
* It would be nice to assert that if it's DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
return;
}
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
/* this buffer is not on any list */
if (HDR_HAS_L2HDR(hdr)) {
/*
* We don't want to leak the b_tmp_cdata buffer that was
* allocated in l2arc_write_buffers()
*/
}
/*
* Do we have more than one buf?
*/
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr.
*/
}
/*
* We're releasing a duplicate user data buffer, update
* our statistics accordingly.
*/
if (HDR_ISTYPE_DATA(hdr)) {
}
} else {
/* protected by hash lock, or hdr is on arc_anon */
}
}
int
{
int released;
return (released);
}
#ifdef ZFS_DEBUG
int
{
int referenced;
return (referenced);
}
#endif
static void
{
/*
* If the IO is already in progress, then this is a re-write
* attempt, so we need to thaw and re-compute the cksum.
* It is the responsibility of the callback to handle the
* accounting for any re-write attempt.
*/
if (HDR_IO_IN_PROGRESS(hdr)) {
}
}
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
{
}
static void
{
} else {
}
} else {
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* (and uncached).
*/
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
panic("bad overwrite, hdr=%p exists=%p",
/* nopwrite */
panic("bad nopwrite, hdr=%p exists=%p",
} else {
/* Dedup */
}
}
/* if it's not anon, we are doing a scrub */
} else {
}
}
zio_t *
{
if (l2arc)
if (l2arc_compress)
return (zio);
}
static int
{
#ifdef _KERNEL
#if defined(__i386)
#endif
return (0);
page_load = 0;
}
/*
* If we are in pageout, we know that memory is already tight,
* the arc is already going to be evicting, so we just want to
* continue to let page writes occur as quickly as possible.
*/
if (curproc == proc_pageout) {
/* Note: reserve is inflated, so we deflate */
return (0);
} else if (page_load > 0 && arc_reclaim_needed()) {
/* memory is low, delay before restarting */
}
page_load = 0;
#endif
return (0);
}
void
{
}
int
{
int error;
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/*
* Writes will, almost always, require additional memory allocations
* make sure that there is sufficient available memory for this.
*/
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
arc_tempreserve>>10,
}
return (0);
}
static void
{
}
static int
{
if (rw == KSTAT_WRITE) {
return (EACCES);
} else {
}
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
unsigned int
{
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout it's lifetime
* (i.e. it's b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed.
*/
}
void
arc_init(void)
{
/*
* allmem is "all memory that we could possibly use".
*/
#ifdef _KERNEL
#else
#endif
/* Convert seconds to clock ticks */
/* Start out with 1/8 of all memory */
#ifdef _KERNEL
/*
* On architectures where the physical memory can be larger
* than the addressable space (intel in 32-bit mode), we may
* need to limit the cache to 1/8 of VM size.
*/
#endif
/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
else
/*
* Allow the tunables to override our calculations if they are
* reasonable (ie. over 64MB)
*/
/* limit meta-data to 1/4 of the arc capacity */
/* Allow the tunable to override if it is reasonable */
if (zfs_arc_meta_min > 0) {
} else {
}
if (zfs_arc_grow_retry > 0)
if (zfs_arc_shrink_shift > 0)
/*
* Ensure that arc_no_grow_shift is less than arc_shrink_shift.
*/
if (arc_no_grow_shift >= arc_shrink_shift)
if (zfs_arc_p_min_shift > 0)
if (zfs_arc_num_sublists_per_state < 1)
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_size = 0;
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
sizeof (arc_buf_hdr_t),
buf_init();
}
/*
* Calculate maximum amount of dirty data per pool.
*
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4GB).
*/
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max_percent / 100;
}
}
void
arc_fini(void)
{
/*
* The reclaim thread will set arc_reclaim_thread_exit back to
* FALSE when it is finished exiting; we're waiting for that.
*/
while (arc_reclaim_thread_exit) {
}
/*
* The user evicts thread will set arc_user_evicts_thread_exit
* to FALSE when it is finished exiting; we're waiting for that.
*/
while (arc_user_evicts_thread_exit) {
}
/* Use TRUE to ensure *all* buffers are evicted */
}
buf_fini();
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*/
static boolean_t
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(void)
{
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
if (size == 0) {
"be greater than zero, resetting it to the default (%d)",
}
return (size);
}
static clock_t
{
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
else
now = ddi_get_lbolt();
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
do {
/* loop around the list looking for a non-faulted vdev */
} else {
}
/* if we have come back to the start, bail out */
break;
/* if we were unable to find any usable vdevs, return NULL */
out:
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
{
}
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
{
int64_t bytes_dropped = 0;
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
/*
* We may have allocated a buffer for L2ARC compression,
* we must release it to avoid leaking this data.
*/
/*
* Error - drop L2ARC entry.
*/
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
}
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
{
int equal;
/*
* If the buffer was compressed, decompress it first.
*/
/*
* Check this survived the L2ARC journey.
*/
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
} else {
}
if (!equal)
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
}
}
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
{
unsigned int idx;
switch (list_num) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
{
int64_t bytes_evicted = 0;
/*
* This is the first sweep through the device. There is
* nothing to evict.
*/
return;
}
/*
* When nearing the end of the device, evict to the end
* before the device write hand jumps to the start.
*/
} else {
}
top:
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
goto top;
}
if (HDR_L2_WRITE_HEAD(hdr)) {
/*
* We hit a write head node. Leave it for
* l2arc_write_done().
*/
continue;
}
/*
* We've evicted to the target address,
* or the end of the device.
*/
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_size.
*/
} else {
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
}
/* Tell ARC this no longer exists in L2ARC. */
/* Ensure this header has finished being written */
}
}
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment).
*/
static uint64_t
{
void *buf_data;
/* Lower the flag now, we might want to raise it again later. */
/*
* We will want to try to compress buffers that are at least 2x the
* device sector size.
*/
/*
* Copy buffers for L2ARC writing.
*/
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
else
if (do_headroom_boost)
else
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
/*
* Searched too far.
*/
break;
}
continue;
}
break;
}
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
}
/*
* Create and add a new L2ARC header.
*/
/*
* Temporarily stash the data buffer in b_tmp_cdata.
* The subsequent write step will pick it up from
* there. This is because can't access b_l1hdr.b_buf
* without holding the hash_lock, which we in turn
* can't access without holding the ARC list locks
* (which we want to avoid during compression/writing).
*/
/*
* Compute and store the buffer cksum before
* writing. On debug the cksum is verified first.
*/
}
break;
}
/* No buffers selected for writing? */
return (0);
}
/*
* Now start writing the buffers. We're starting at the write head
* and work backwards, retracing the course of the buffer selector
* loop above.
*/
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
/*
* We shouldn't need to lock the buffer here, since we flagged
* it as ARC_FLAG_L2_WRITING in the previous step, but we must
* take care to only access its L2 cache parameters. In
* particular, hdr->l1hdr.b_buf may be invalid by now due to
* ARC eviction.
*/
if ((HDR_L2COMPRESS(hdr)) &&
if (l2arc_compress_buf(hdr)) {
/*
* If compression succeeded, enable headroom
* boost on the next scan cycle.
*/
*headroom_boost = B_TRUE;
}
}
/*
* Pick up the buffer data we had previously stashed away
* (and now potentially also compressed).
*/
/* Compression may have squashed the buffer to zero length. */
if (buf_sz != 0) {
(void) zio_nowait(wzio);
write_asize += buf_sz;
/*
* Keep the clock hand suitably device-aligned.
*/
write_psize += buf_p_sz;
}
}
/*
* Bump device hand to the device start if it is approaching the end.
* l2arc_evict() will already have evicted ahead for this case.
*/
}
return (write_asize);
}
/*
* Compresses an L2ARC buffer.
* The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its
* size in l2hdr->b_asize. This routine tries to compress the data and
* depending on the compression result there are three possible outcomes:
* *) The buffer was incompressible. The original l2hdr contents were left
* untouched and are ready for writing to an L2 device.
* *) The buffer was all-zeros, so there is no need to write it to an L2
* device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
* set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
* *) Compression succeeded and b_tmp_cdata was replaced with a temporary
* data buffer which holds the compressed data to be written, and b_asize
* tells us how much data there is. b_compress is set to the appropriate
* compression algorithm. Once writing is done, invoke
* l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
*
* Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
* buffer was incompressible).
*/
static boolean_t
{
void *cdata;
}
if (csize == 0) {
/* zero block, indicate that there's nothing to write */
return (B_TRUE);
/*
* Compression succeeded, we'll keep the cdata around for
* writing and release it afterwards.
*/
return (B_TRUE);
} else {
/*
* Compression failed, release the compressed buffer.
* l2hdr will be left unmodified.
*/
return (B_FALSE);
}
}
/*
* Decompresses a zio read back from an l2arc device. On success, the
* underlying zio's io_data buffer is overwritten by the uncompressed
* version. On decompression error (corrupt compressed stream), the
* zio->io_error value is set to signal an I/O error.
*
* Please note that the compressed data stream is not checksummed, so
* if the underlying device is experiencing data corruption, we may feed
* corrupt data to the decompressor, so the decompressor needs to be
* able to handle this situation (LZ4 does).
*/
static void
{
/*
* An io error has occured, just restore the original io
* size in preparation for a main pool read.
*/
return;
}
if (c == ZIO_COMPRESS_EMPTY) {
/*
* An empty buffer results in a null zio, which means we
* need to fill its io_data after we're done restoring the
* buffer's contents.
*/
} else {
/*
* We copy the compressed data from the start of the arc buffer
* (the zio_read will have pulled in only what we need, the
* rest is garbage which we will overwrite at decompression)
* and then decompress back to the ARC data buffer. This way we
* can minimize copying by simply decompressing back over the
* original compressed data (rather than decompressing to an
* aux buffer and then copying back the uncompressed buffer,
* which is likely to be much larger).
*/
void *cdata;
}
/* Restore the expected uncompressed IO size. */
}
/*
* Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
* This buffer serves as a temporary holder of compressed data while
* the buffer entry is being written to an l2arc device. Once that is
* done, we can dispose of it.
*/
static void
{
if (comp == ZIO_COMPRESS_OFF) {
/*
* In this case, b_tmp_cdata points to the same buffer
* as the arc_buf_t's b_data field. We don't want to
* free it, since the arc_buf_t will handle that.
*/
} else if (comp == ZIO_COMPRESS_EMPTY) {
/*
* In this case, b_tmp_cdata was compressed to an empty
* buffer, thus there's nothing to free and b_tmp_cdata
* should have been set to NULL in l2arc_write_buffers().
*/
} else {
/*
* If the data was compressed, then we've allocated a
* temporary buffer for it, so now we need to release it.
*/
}
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
static void
l2arc_feed_thread(void)
{
while (l2arc_thread_exit == 0) {
next);
/*
* Quick check for L2ARC devices.
*/
if (l2arc_ndev == 0) {
continue;
}
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
continue;
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (arc_reclaim_needed()) {
continue;
}
size = l2arc_write_size();
/*
* Evict L2ARC buffers that will be overwritten.
*/
/*
* Write ARC buffers.
*/
/*
* Calculate interval between writes.
*/
}
l2arc_thread_exit = 0;
thread_exit();
}
{
break;
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
{
/*
* Create a new l2arc device entry.
*/
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
/*
* Add device to global list
*/
}
/*
* Remove a vdev from the L2ARC.
*/
void
{
/*
* Find the device by vdev
*/
break;
}
}
/*
* Remove device from global list
*/
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
l2arc_writes_sent = 0;
l2arc_writes_done = 0;
}
void
l2arc_fini(void)
{
/*
* This is called from dmu_fini(), which is called from spa_fini();
* Because of this, we can assume that all l2arc devices have
* already been removed when the pools themselves were removed.
*/
}
void
l2arc_start(void)
{
if (!(spa_mode_global & FWRITE))
return;
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & FWRITE))
return;
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
}