diff --git a/inc/ocf.h b/inc/ocf.h index d52af719..06506caa 100644 --- a/inc/ocf.h +++ b/inc/ocf.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2022 Intel Corporation + * Copyright(c) 2026 Unvertical * SPDX-License-Identifier: BSD-3-Clause */ @@ -26,6 +27,7 @@ #include "cleaning/alru.h" #include "cleaning/acp.h" #include "promotion/nhit.h" +#include "prefetch/readahead.h" #include "ocf_metadata.h" #include "ocf_io_class.h" #include "ocf_stats.h" diff --git a/inc/ocf_mngt.h b/inc/ocf_mngt.h index 308cac49..1fc37b3f 100644 --- a/inc/ocf_mngt.h +++ b/inc/ocf_mngt.h @@ -1031,6 +1031,37 @@ int ocf_mngt_cache_prefetch_set_policy(ocf_cache_t cache, ocf_pf_mask_t mask); */ int ocf_mngt_cache_prefetch_get_policy(ocf_cache_t cache, ocf_pf_mask_t *mask); +/** + * @brief Set prefetch policy parameter in given cache + * + * @attention This changes only runtime state. To make changes persistent + * use function ocf_mngt_cache_save(). + * + * @param[in] cache Cache handle + * @param[in] pf_id Prefetch policy id + * @param[in] param_id Prefetch policy parameter id + * @param[in] param_value Prefetch policy parameter value + * + * @retval 0 Parameter has been set successfully + * @retval Non-zero Error occurred and parameter has not been set + */ +int ocf_mngt_cache_prefetch_set_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t param_value); + +/** + * @brief Get prefetch policy parameter from given cache + * + * @param[in] cache Cache handle + * @param[in] pf_id Prefetch policy id + * @param[in] param_id Prefetch policy parameter id + * @param[out] param_value Variable to store parameter value + * + * @retval 0 Parameter has been get successfully + * @retval Non-zero Error occurred and parameter has not been get + */ +int ocf_mngt_cache_prefetch_get_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t *param_value); + /** * @brief IO class configuration */ diff --git a/inc/prefetch/readahead.h b/inc/prefetch/readahead.h new file mode 100644 index 00000000..25e56eee --- /dev/null +++ b/inc/prefetch/readahead.h @@ -0,0 +1,38 @@ +/* + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_PREFETCH_READAHEAD_H__ +#define __OCF_PREFETCH_READAHEAD_H__ + +/** + * @file + * @brief Readahead prefetch policy API + */ + +enum ocf_prefetch_readahead_parameters { + ocf_readahead_threshold, +}; + +/** + * @name Readahead prefetch policy parameters + * @{ + */ + +/** + * Readahead threshold - minimum sequential stream bytes before prefetching + */ + +/** Threshold minimum value (bytes) */ +#define OCF_PF_READAHEAD_MIN_THRESHOLD 0 +/** Threshold maximum value (bytes) */ +#define OCF_PF_READAHEAD_MAX_THRESHOLD 4294967295U +/** Threshold default value (bytes) */ +#define OCF_PF_READAHEAD_DEFAULT_THRESHOLD (64 * KiB) + +/** + * @} + */ + +#endif /* __OCF_PREFETCH_READAHEAD_H__ */ diff --git a/src/metadata/metadata_superblock.h b/src/metadata/metadata_superblock.h index 78d34762..f0eab0aa 100644 --- a/src/metadata/metadata_superblock.h +++ b/src/metadata/metadata_superblock.h @@ -12,6 +12,7 @@ #include "metadata_segment.h" #include "../promotion/promotion.h" #include "ocf/ocf_prefetch.h" +#include "../prefetch/ocf_prefetch_priv.h" #define CACHE_MAGIC_NUMBER 0x187E1CA6 @@ -56,6 +57,7 @@ struct ocf_superblock_config { struct promotion_policy_config promotion[PROMOTION_POLICY_TYPE_MAX]; ocf_pf_mask_t prefetch_mask; + struct prefetch_policy_config prefetch[PREFETCH_POLICY_TYPE_MAX]; /* * Checksum for each metadata region. diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c index 9bc2efa8..2ea64081 100644 --- a/src/mngt/ocf_mngt_cache.c +++ b/src/mngt/ocf_mngt_cache.c @@ -34,6 +34,7 @@ #include "../promotion/ops.h" #include "../concurrency/ocf_pio_concurrency.h" #include "../ocf_seq_cutoff.h" +#include "../prefetch/ocf_prefetch_priv.h" #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device) @@ -402,6 +403,7 @@ static void _ocf_mngt_deinit_added_cores( ocf_volume_deinit(&core->front_volume); } + ocf_prefetch_deinit(cache, core); ocf_core_seq_cutoff_deinit(core); ocf_core_seq_detect_deinit(core); @@ -494,6 +496,8 @@ static void _ocf_mngt_load_add_cores(ocf_pipeline_t pipeline, goto err; ocf_core_seq_cutoff_init(core); + ocf_prefetch_init(cache, core); + if (!core->opened) { env_bit_set(ocf_cache_state_incomplete, &cache->cache_state); @@ -1582,6 +1586,7 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache, cache->conf_meta->cache_mode = params->metadata.cache_mode; cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy; cache->conf_meta->prefetch_mask = OCF_PF_MASK_DEFAULT; + ocf_prefetch_setup(cache); __set_cleaning_policy(cache, ocf_cleaning_default); /* Init Partitions */ @@ -3747,6 +3752,10 @@ int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type, int ocf_mngt_cache_prefetch_set_policy(ocf_cache_t cache, ocf_pf_mask_t mask) { ocf_pf_mask_t valid_mask = (1 << ocf_pf_num) - 1; + ocf_pf_mask_t old_mask; + ocf_pf_mask_t enabled, disabled; + ocf_core_t core; + ocf_core_id_t core_id; if (ocf_cache_is_standby(cache)) return -OCF_ERR_CACHE_STANDBY; @@ -3756,8 +3765,25 @@ int ocf_mngt_cache_prefetch_set_policy(ocf_cache_t cache, ocf_pf_mask_t mask) ocf_metadata_start_exclusive_access(&cache->metadata.lock); + old_mask = cache->conf_meta->prefetch_mask; cache->conf_meta->prefetch_mask = mask; + /* Newly enabled prefetchers need initialization */ + enabled = mask & ~old_mask; + /* Newly disabled prefetchers need deinitialization */ + disabled = old_mask & ~mask; + + if (enabled || disabled) { + for_each_core(cache, core, core_id) { + ocf_pf_id_t pf_id; + + for_each_pf_mask(pf_id, enabled) + ocf_prefetch_init_one(core, pf_id); + for_each_pf_mask(pf_id, disabled) + ocf_prefetch_deinit_one(core, pf_id); + } + } + ocf_metadata_end_exclusive_access(&cache->metadata.lock); return 0; @@ -3779,6 +3805,46 @@ int ocf_mngt_cache_prefetch_get_policy(ocf_cache_t cache, ocf_pf_mask_t *mask) return 0; } +int ocf_mngt_cache_prefetch_set_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t param_value) +{ + int ret; + + OCF_CHECK_NULL(cache); + + if (!OCF_PF_ID_VALID(pf_id)) + return -OCF_ERR_INVAL; + + if (ocf_cache_is_standby(cache)) + return -OCF_ERR_CACHE_STANDBY; + + if (!ocf_cache_is_device_attached(cache)) + return -OCF_ERR_CACHE_DETACHED; + + ocf_metadata_start_exclusive_access(&cache->metadata.lock); + + ret = ocf_prefetch_set_param(cache, pf_id, param_id, param_value); + + ocf_metadata_end_exclusive_access(&cache->metadata.lock); + + return ret; +} + +int ocf_mngt_cache_prefetch_get_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t *param_value) +{ + OCF_CHECK_NULL(cache); + OCF_CHECK_NULL(param_value); + + if (!OCF_PF_ID_VALID(pf_id)) + return -OCF_ERR_INVAL; + + if (ocf_cache_is_standby(cache)) + return -OCF_ERR_CACHE_STANDBY; + + return ocf_prefetch_get_param(cache, pf_id, param_id, param_value); +} + int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache) { OCF_CHECK_NULL(cache); diff --git a/src/mngt/ocf_mngt_common.c b/src/mngt/ocf_mngt_common.c index a152baf9..089bf537 100644 --- a/src/mngt/ocf_mngt_common.c +++ b/src/mngt/ocf_mngt_common.c @@ -19,6 +19,7 @@ #include "../ocf_queue_priv.h" #include "../engine/engine_common.h" #include "../ocf_seq_cutoff.h" +#include "../prefetch/ocf_prefetch_priv.h" /* Close if opened */ void cache_mngt_core_deinit(ocf_core_t core) @@ -132,6 +133,7 @@ void cache_mngt_core_remove_from_cache(ocf_core_t core) { ocf_cache_t cache = ocf_core_get_cache(core); + ocf_prefetch_deinit(cache, core); ocf_core_seq_cutoff_deinit(core); ocf_core_seq_detect_deinit(core); env_free(core->counters); diff --git a/src/mngt/ocf_mngt_core.c b/src/mngt/ocf_mngt_core.c index 690a0bcf..331a9cc4 100644 --- a/src/mngt/ocf_mngt_core.c +++ b/src/mngt/ocf_mngt_core.c @@ -17,6 +17,7 @@ #include "../ocf_def_priv.h" #include "../cleaning/cleaning_ops.h" #include "../ocf_seq_cutoff.h" +#include "../prefetch/ocf_prefetch_priv.h" ocf_seq_no_t ocf_mngt_get_core_seq_no(ocf_cache_t cache) { @@ -156,6 +157,7 @@ static void _ocf_mngt_cache_add_core_handle_error( if (context->flags.clean_pol_added) ocf_cleaning_remove_core(cache, core_id); + ocf_prefetch_deinit(cache, core); ocf_core_seq_cutoff_deinit(core); ocf_core_seq_detect_deinit(core); @@ -486,6 +488,7 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline, /* Register seq detect consumers for default config */ ocf_core_seq_cutoff_init(core); + ocf_prefetch_init(cache, core); /* When adding new core to cache, allocate stat counters */ core->counters = diff --git a/src/ocf_core.c b/src/ocf_core.c index f01db5a1..60358971 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -306,8 +306,6 @@ static void ocf_core_volume_submit_io(ocf_io_t io) fastpath = ocf_core_submit_io_fast(req, cache); - ocf_core_seq_detect_update(core, req); - if (fastpath == OCF_FAST_PATH_YES) goto prefetch; @@ -322,6 +320,8 @@ static void ocf_core_volume_submit_io(ocf_io_t io) prefetch: ocf_prefetch(req); + ocf_core_seq_detect_update(core, req); + ocf_req_put(req); return; diff --git a/src/ocf_core_priv.h b/src/ocf_core_priv.h index 2565c7ce..8bb04a2e 100644 --- a/src/ocf_core_priv.h +++ b/src/ocf_core_priv.h @@ -13,6 +13,7 @@ #include "ocf_ctx_priv.h" #include "ocf_volume_priv.h" #include "ocf_seq_detect.h" +#include "ocf/ocf_prefetch.h" #define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \ ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, ".%s" prefix, \ @@ -92,6 +93,7 @@ struct ocf_core { struct ocf_seq_detect *seq_detect; + void *pf_priv[ocf_pf_num]; bool seq_cutoff_active; env_atomic flushed; diff --git a/src/ocf_seq_detect.c b/src/ocf_seq_detect.c index 18d0e3a9..5ec43c7c 100644 --- a/src/ocf_seq_detect.c +++ b/src/ocf_seq_detect.c @@ -240,15 +240,13 @@ void ocf_core_seq_detect_update(ocf_core_t core, struct ocf_request *req) ocf_seq_detect_sync_config(queue_sd, core); ocf_seq_detect_sync_config(core_sd, core); - if (req->seq_cutoff_core) { - env_rwlock_write_lock(&core_sd->lock); - stream = ocf_seq_detect_update(core_sd, - req->addr, req->bytes, req->rw, false); - env_rwlock_write_unlock(&core_sd->lock); + env_rwlock_write_lock(&core_sd->lock); + stream = ocf_seq_detect_update(core_sd, + req->addr, req->bytes, req->rw, false); + env_rwlock_write_unlock(&core_sd->lock); - if (stream) - return; - } + if (stream) + return; env_rwlock_write_lock(&queue_sd->lock); stream = ocf_seq_detect_update(queue_sd, diff --git a/src/prefetch/ocf_prefetch.c b/src/prefetch/ocf_prefetch.c index cb0901dd..ba20469e 100644 --- a/src/prefetch/ocf_prefetch.c +++ b/src/prefetch/ocf_prefetch.c @@ -12,15 +12,27 @@ #include "../engine/engine_prefetch.h" #include "ocf_env.h" #include "ocf_prefetch_priv.h" -#include "ocf_prefetch_readahead.h" +#include "ocf_prefetch_readahead_priv.h" struct ocf_pf_ops { + void (*setup)(ocf_cache_t cache); + int (*init)(ocf_core_t core); + void (*deinit)(ocf_core_t core); void (*get_range)(struct ocf_request *req, struct ocf_pf_range *range); + int (*set_param)(ocf_cache_t cache, uint32_t param_id, + uint32_t param_value); + int (*get_param)(ocf_cache_t cache, uint32_t param_id, + uint32_t *param_value); }; static struct ocf_pf_ops ocf_pf_ops[ocf_pf_num] = { [ocf_pf_readahead] = { + .setup = ocf_pf_readahead_setup, + .init = ocf_pf_readahead_init, + .deinit = ocf_pf_readahead_deinit, .get_range = ocf_pf_readahead_get_range, + .set_param = ocf_pf_readahead_set_param, + .get_param = ocf_pf_readahead_get_param, }, }; @@ -114,6 +126,72 @@ static void ocf_prefetch_range(struct ocf_request *req, ocf_pf_id_t pf_id, } } +void ocf_prefetch_setup(ocf_cache_t cache) +{ + ocf_pf_id_t pf_id; + + for_each_pf(pf_id) { + if (ocf_pf_ops[pf_id].setup) + ocf_pf_ops[pf_id].setup(cache); + } +} + +int ocf_prefetch_set_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t param_value) +{ + ENV_BUG_ON(!OCF_PF_ID_VALID(pf_id)); + + if (!ocf_pf_ops[pf_id].set_param) + return -OCF_ERR_INVAL; + + return ocf_pf_ops[pf_id].set_param(cache, param_id, param_value); +} + +int ocf_prefetch_get_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t *param_value) +{ + ENV_BUG_ON(!OCF_PF_ID_VALID(pf_id)); + + if (!ocf_pf_ops[pf_id].get_param) + return -OCF_ERR_INVAL; + + return ocf_pf_ops[pf_id].get_param(cache, param_id, param_value); +} + +void ocf_prefetch_init(ocf_cache_t cache, ocf_core_t core) +{ + ocf_pf_mask_t pf_mask = cache->conf_meta->prefetch_mask; + ocf_pf_id_t pf_id; + + for_each_pf_mask(pf_id, pf_mask) { + if (ocf_pf_ops[pf_id].init) + ocf_pf_ops[pf_id].init(core); + } +} + +void ocf_prefetch_init_one(ocf_core_t core, ocf_pf_id_t pf_id) +{ + if (ocf_pf_ops[pf_id].init) + ocf_pf_ops[pf_id].init(core); +} + +void ocf_prefetch_deinit_one(ocf_core_t core, ocf_pf_id_t pf_id) +{ + if (ocf_pf_ops[pf_id].deinit) + ocf_pf_ops[pf_id].deinit(core); +} + +void ocf_prefetch_deinit(ocf_cache_t cache, ocf_core_t core) +{ + ocf_pf_mask_t pf_mask = cache->conf_meta->prefetch_mask; + ocf_pf_id_t pf_id; + + for_each_pf_mask(pf_id, pf_mask) { + if (ocf_pf_ops[pf_id].deinit) + ocf_pf_ops[pf_id].deinit(core); + } +} + void ocf_prefetch(struct ocf_request *req) { ocf_pf_mask_t pf_mask = req->cache->conf_meta->prefetch_mask; @@ -123,6 +201,14 @@ void ocf_prefetch(struct ocf_request *req) if (req->rw != OCF_READ) return; + switch (req->cache_mode) { + case ocf_req_cache_mode_pt: + case ocf_req_cache_mode_wo: + return; + default: + break; + } + for_each_pf_mask(pf_id, pf_mask) ocf_pf_ops[pf_id].get_range(req, &ranges[pf_id]); diff --git a/src/prefetch/ocf_prefetch_priv.h b/src/prefetch/ocf_prefetch_priv.h index 76a2e3d7..2a3d9bd2 100644 --- a/src/prefetch/ocf_prefetch_priv.h +++ b/src/prefetch/ocf_prefetch_priv.h @@ -14,6 +14,13 @@ #define OCF_PF_MAX_TOTAL (8 * MiB) +#define PREFETCH_POLICY_CONFIG_BYTES 256 +#define PREFETCH_POLICY_TYPE_MAX ((int)ocf_pf_num) + +struct prefetch_policy_config { + uint8_t data[PREFETCH_POLICY_CONFIG_BYTES]; +}; + #define OCF_PF_ID_VALID(pf_id) ((pf_id) != ocf_pf_none && (pf_id) < ocf_pf_num) #define OCF_PF_ID_ENABLED(pf_id, enabled_mask) ((1 << ((pf_id))) & enabled_mask) @@ -30,5 +37,14 @@ struct ocf_pf_range { }; void ocf_prefetch(struct ocf_request *req); +void ocf_prefetch_setup(ocf_cache_t cache); +void ocf_prefetch_init(ocf_cache_t cache, ocf_core_t core); +void ocf_prefetch_deinit(ocf_cache_t cache, ocf_core_t core); +void ocf_prefetch_init_one(ocf_core_t core, ocf_pf_id_t pf_id); +void ocf_prefetch_deinit_one(ocf_core_t core, ocf_pf_id_t pf_id); +int ocf_prefetch_set_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t param_value); +int ocf_prefetch_get_param(ocf_cache_t cache, ocf_pf_id_t pf_id, + uint32_t param_id, uint32_t *param_value); #endif /* __OCF_PREFETCH_PRIV_H__ */ diff --git a/src/prefetch/ocf_prefetch_readahead.c b/src/prefetch/ocf_prefetch_readahead.c index 969326ce..1dbed5a4 100644 --- a/src/prefetch/ocf_prefetch_readahead.c +++ b/src/prefetch/ocf_prefetch_readahead.c @@ -4,22 +4,126 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#include "ocf_prefetch_readahead.h" +#include "ocf_prefetch_readahead_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_core_priv.h" +#include "../ocf_queue_priv.h" +#include "../ocf_seq_detect.h" #include "../utils/utils_cache_line.h" #include "ocf/ocf_def.h" +#include "ocf/prefetch/readahead.h" #define OCF_PF_READAHEAD_MIN (64 * KiB) -/* - * NOTE: This simplistic implementation is meant to serve as an reference - * implementation for other prefetch policies. In the current form - * it's not expected to bring any performance improvements for most - * of the workloads (actually it's expected to cause performance - * degratation in most cases). - */ +static inline struct readahead_prefetch_policy_config * +ocf_pf_readahead_config(ocf_cache_t cache) +{ + return (void *)&cache->conf_meta->prefetch[ocf_pf_readahead].data; +} + +void ocf_pf_readahead_setup(ocf_cache_t cache) +{ + struct readahead_prefetch_policy_config *config; + + config = ocf_pf_readahead_config(cache); + config->threshold = OCF_PF_READAHEAD_DEFAULT_THRESHOLD; +} + +int ocf_pf_readahead_set_param(ocf_cache_t cache, uint32_t param_id, + uint32_t param_value) +{ + struct readahead_prefetch_policy_config *config; + + config = ocf_pf_readahead_config(cache); + + switch (param_id) { + case ocf_readahead_threshold: + if (param_value < OCF_PF_READAHEAD_MIN_THRESHOLD || + param_value > OCF_PF_READAHEAD_MAX_THRESHOLD) { + ocf_cache_log(cache, log_err, "Refusing setting " + "prefetch parameter because threshold is " + "not within range of <%u-%u>\n", + OCF_PF_READAHEAD_MIN_THRESHOLD, + OCF_PF_READAHEAD_MAX_THRESHOLD); + return -OCF_ERR_INVAL; + } + config->threshold = param_value; + ocf_cache_log(cache, log_info, "Readahead prefetch " + "threshold: %u\n", config->threshold); + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + +int ocf_pf_readahead_get_param(ocf_cache_t cache, uint32_t param_id, + uint32_t *param_value) +{ + struct readahead_prefetch_policy_config *config; + + config = ocf_pf_readahead_config(cache); + + switch (param_id) { + case ocf_readahead_threshold: + *param_value = config->threshold; + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + +int ocf_pf_readahead_init(ocf_core_t core) +{ + ocf_seq_detect_register_consumer(core->seq_detect); + core->pf_priv[ocf_pf_readahead] = core; + return 0; +} + +void ocf_pf_readahead_deinit(ocf_core_t core) +{ + if (!core->pf_priv[ocf_pf_readahead]) + return; + + ocf_seq_detect_unregister_consumer(core->seq_detect); + core->pf_priv[ocf_pf_readahead] = NULL; +} + void ocf_pf_readahead_get_range(struct ocf_request *req, struct ocf_pf_range *range) { + struct ocf_seq_detect *queue_sd = req->io_queue->seq_detect; + struct ocf_seq_detect *core_sd = req->core->seq_detect; + struct ocf_seq_detect_stream *stream; + uint64_t bytes = 0; + + range->core_line_first = 0; + range->core_line_count = 0; + + /* Check queue-level detector first, then core-level */ + env_rwlock_read_lock(&queue_sd->lock); + stream = ocf_seq_detect_find(queue_sd, req->addr, req->rw); + if (stream) + bytes = stream->bytes; + env_rwlock_read_unlock(&queue_sd->lock); + + if (!stream) { + env_rwlock_read_lock(&core_sd->lock); + stream = ocf_seq_detect_find(core_sd, req->addr, req->rw); + if (stream) + bytes = stream->bytes; + env_rwlock_read_unlock(&core_sd->lock); + } + + if (!stream) + return; + + if (bytes < ocf_pf_readahead_config(req->cache)->threshold) + return; + range->core_line_first = req->core_line_first + req->core_line_count; range->core_line_count = OCF_MAX(req->core_line_count, ocf_bytes_2_lines(req->cache, OCF_PF_READAHEAD_MIN)); diff --git a/src/prefetch/ocf_prefetch_readahead.h b/src/prefetch/ocf_prefetch_readahead.h deleted file mode 100644 index 6abbb642..00000000 --- a/src/prefetch/ocf_prefetch_readahead.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright(c) 2022-2024 Huawei Technologies - * Copyright(c) 2026 Unvertical - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef __OCF_PREFETCH_READAHEAD_H__ -#define __OCF_PREFETCH_READAHEAD_H__ - -#include "ocf_prefetch_priv.h" -#include "ocf/ocf_types.h" - -void ocf_pf_readahead_get_range(struct ocf_request *req, - struct ocf_pf_range *range); - -#endif /* __OCF_PREFETCH_READAHEAD_H__ */ diff --git a/src/prefetch/ocf_prefetch_readahead_priv.h b/src/prefetch/ocf_prefetch_readahead_priv.h new file mode 100644 index 00000000..06d56215 --- /dev/null +++ b/src/prefetch/ocf_prefetch_readahead_priv.h @@ -0,0 +1,28 @@ +/* + * Copyright(c) 2022-2024 Huawei Technologies + * Copyright(c) 2026 Unvertical + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __OCF_PREFETCH_READAHEAD_PRIV_H__ +#define __OCF_PREFETCH_READAHEAD_PRIV_H__ + +#include "ocf_prefetch_priv.h" +#include "ocf/ocf_types.h" +#include "ocf_env.h" + +struct readahead_prefetch_policy_config { + uint32_t threshold; /* in bytes */ +}; + +void ocf_pf_readahead_setup(ocf_cache_t cache); +int ocf_pf_readahead_init(ocf_core_t core); +void ocf_pf_readahead_deinit(ocf_core_t core); +void ocf_pf_readahead_get_range(struct ocf_request *req, + struct ocf_pf_range *range); +int ocf_pf_readahead_set_param(ocf_cache_t cache, uint32_t param_id, + uint32_t param_value); +int ocf_pf_readahead_get_param(ocf_cache_t cache, uint32_t param_id, + uint32_t *param_value); + +#endif /* __OCF_PREFETCH_READAHEAD_PRIV_H__ */ diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index 75375b79..7a1019b5 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -196,6 +196,20 @@ def __str__(self): return self.name +class PrefetchPolicy(IntEnum): + READAHEAD = 0 + + def __str__(self): + return self.name + + +class ReadaheadParams(IntEnum): + THRESHOLD = 0 + + def __str__(self): + return self.name + + class CleaningPolicy(IntEnum): NOP = 0 ALRU = 1 @@ -471,6 +485,32 @@ def get_prefetch_policy(self): return mask.value + def set_prefetch_param(self, pf_id, param_id, param_value): + self.write_lock() + + status = self.owner.lib.ocf_mngt_cache_prefetch_set_param( + self.cache_handle, pf_id, param_id, param_value + ) + + self.write_unlock() + if status: + raise OcfError("Error setting prefetch parameter", status) + + def get_prefetch_param(self, pf_id, param_id): + self.read_lock() + + param_value = c_uint32() + + status = self.owner.lib.ocf_mngt_cache_prefetch_get_param( + self.cache_handle, pf_id, param_id, byref(param_value) + ) + + self.read_unlock() + if status: + raise OcfError("Error getting prefetch parameter", status) + + return param_value.value + def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy): self.write_lock() @@ -1243,6 +1283,10 @@ def get_by_name(cache_name, owner=None): lib.ocf_mngt_cache_prefetch_set_policy.restype = c_int lib.ocf_mngt_cache_prefetch_get_policy.argtypes = [c_void_p, POINTER(c_uint8)] lib.ocf_mngt_cache_prefetch_get_policy.restype = c_int +lib.ocf_mngt_cache_prefetch_set_param.argtypes = [c_void_p, c_uint32, c_uint32, c_uint32] +lib.ocf_mngt_cache_prefetch_set_param.restype = c_int +lib.ocf_mngt_cache_prefetch_get_param.argtypes = [c_void_p, c_uint32, c_uint32, POINTER(c_uint32)] +lib.ocf_mngt_cache_prefetch_get_param.restype = c_int lib.ocf_cache_io_class_get_info.restype = c_int lib.ocf_cache_io_class_get_info.argtypes = [c_void_p, c_uint32, c_void_p] lib.ocf_mngt_add_partition_to_cache.restype = c_int diff --git a/tests/functional/tests/engine/test_prefetch_readahead.py b/tests/functional/tests/engine/test_prefetch_readahead.py new file mode 100644 index 00000000..1272db65 --- /dev/null +++ b/tests/functional/tests/engine/test_prefetch_readahead.py @@ -0,0 +1,213 @@ +# +# Copyright(c) 2026 Unvertical +# SPDX-License-Identifier: BSD-3-Clause +# + +import math +import pytest + +from pyocf.types.cache import Cache, CacheMode, PrefetchPolicy, ReadaheadParams +from pyocf.types.core import Core +from pyocf.types.volume import RamVolume +from pyocf.types.volume_core import CoreVolume +from pyocf.utils import Size +from pyocf.types.shared import CacheLineSize, SeqCutOffPolicy +from pyocf.rio import Rio, ReadWrite +from pyocf.helpers import is_block_size_4k + + +PF_READAHEAD_MASK = 1 << PrefetchPolicy.READAHEAD +PF_READAHEAD_MIN = Size.from_KiB(64) +STATS_BLOCK_SIZE = Size.from_KiB(4) + + +def setup_cache_core(pyocf_ctx, cache_line_size=CacheLineSize.DEFAULT): + cache_device = RamVolume(Size.from_MiB(50)) + core_device = RamVolume(Size.from_MiB(100)) + + cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT, + cache_line_size=cache_line_size) + core = Core.using_device(core_device) + cache.add_core(core) + + cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) + + vol = CoreVolume(core) + queue = cache.get_default_queue() + + return cache, core, vol, queue + + +def get_prefetch_count(cache): + stats = cache.get_stats() + return stats["req"]["prefetch"][0].value + + +def get_prefetch_blocks(cache): + stats = cache.get_stats() + return { + "core_rd": stats["block"]["prefetch_core_rd"][0].value, + "cache_wr": stats["block"]["prefetch_cache_wr"][0].value, + } + + +def test_prefetch_policy_default(pyocf_ctx): + """Check that the default prefetch policy mask is 0 (all disabled).""" + cache_device = RamVolume(Size.from_MiB(50)) + cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT) + + assert cache.get_prefetch_policy() == 0 + + +def test_prefetch_readahead_params_default(pyocf_ctx): + """Check that the default readahead threshold is 64 KiB.""" + cache_device = RamVolume(Size.from_MiB(50)) + cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT) + + val = cache.get_prefetch_param(PrefetchPolicy.READAHEAD, + ReadaheadParams.THRESHOLD) + assert val == int(Size.from_KiB(64)) + + +@pytest.mark.parametrize("threshold", [ + Size(0), Size.from_KiB(16), Size.from_KiB(64), + Size.from_KiB(256), Size.from_MiB(4), +]) +@pytest.mark.parametrize("req_size", [ + Size(512), Size.from_KiB(4), Size.from_KiB(64), Size.from_MiB(1), +]) +@pytest.mark.parametrize("cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_64KiB]) +def test_prefetch_readahead_threshold(pyocf_ctx, threshold, req_size, cls): + """ + Submit sequential I/O just below the readahead threshold and verify + that prefetch has not triggered. Then submit one more request and + verify that prefetch triggers with expected request and block stats. + """ + if is_block_size_4k() and req_size < Size.from_KiB(4): + pytest.skip("Sub-4K I/O not supported in 4K block mode") + + cache, core, vol, queue = setup_cache_core(pyocf_ctx, cache_line_size=cls) + + cache.set_prefetch_policy(PF_READAHEAD_MASK) + cache.set_prefetch_param(PrefetchPolicy.READAHEAD, + ReadaheadParams.THRESHOLD, int(threshold)) + + # Readahead sees bytes from previous requests. After N requests it + # sees (N-1)*req_size bytes. Submit max(1, ceil(threshold/req_size)) + # requests to stay below threshold. + below_count = max(1, math.ceil(int(threshold) / int(req_size))) + below_size = Size(below_count * int(req_size)) + + rio = (Rio().target(vol).bs(req_size).size(below_size) + .readwrite(ReadWrite.READ)) + rio.run([queue]) + queue.settle() + + assert get_prefetch_count(cache) == 0, \ + "No prefetch should trigger below threshold" + assert get_prefetch_blocks(cache) == {"core_rd": 0, "cache_wr": 0}, \ + "No prefetch blocks should be generated below threshold" + + # Submit one more request - this one should trigger prefetch + rio = (Rio().target(vol).bs(req_size).size(req_size).offset(below_size) + .readwrite(ReadWrite.READ)) + rio.run([queue]) + queue.settle() + + assert get_prefetch_count(cache) == 1, \ + "Exactly one prefetch request should be generated" + + # Prefetch range = max(request cache lines, 64KiB worth of cache lines) + req_lines = math.ceil(int(req_size) / cls) + prefetch_lines = max(req_lines, int(PF_READAHEAD_MIN) // cls) + # Block stats are reported in 4KiB units + expected_blocks = prefetch_lines * cls // int(STATS_BLOCK_SIZE) + + blocks = get_prefetch_blocks(cache) + assert blocks["core_rd"] == expected_blocks, \ + f"Expected {expected_blocks} 4KiB blocks read from core" + assert blocks["cache_wr"] == expected_blocks, \ + f"Expected {expected_blocks} 4KiB blocks written to cache" + + +@pytest.mark.parametrize("cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_64KiB]) +def test_prefetch_readahead_random(pyocf_ctx, cls): + """ + Verify that random reads do not trigger prefetch. + """ + cache, core, vol, queue = setup_cache_core(pyocf_ctx, cache_line_size=cls) + + cache.set_prefetch_policy(PF_READAHEAD_MASK) + + rio = (Rio().target(vol).bs(Size.from_KiB(4)).size(Size.from_MiB(20)) + .readwrite(ReadWrite.RANDREAD)) + rio.run([queue]) + queue.settle() + + assert get_prefetch_count(cache) == 0, \ + "No prefetch should trigger for random reads" + + +@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB, + CacheMode.WA, CacheMode.WI]) +@pytest.mark.parametrize("cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_64KiB]) +def test_prefetch_readahead_read_insert(pyocf_ctx, cache_mode, cls): + """ + Verify that prefetch triggers for all cache modes that insert reads + (WT, WB, WA, WI). + """ + cache_device = RamVolume(Size.from_MiB(50)) + core_device = RamVolume(Size.from_MiB(100)) + + cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, + cache_line_size=cls) + core = Core.using_device(core_device) + cache.add_core(core) + + cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) + cache.set_prefetch_policy(PF_READAHEAD_MASK) + cache.set_prefetch_param(PrefetchPolicy.READAHEAD, + ReadaheadParams.THRESHOLD, 0) + + vol = CoreVolume(core) + queue = cache.get_default_queue() + + rio = (Rio().target(vol).bs(Size.from_KiB(4)).size(Size.from_MiB(1)) + .readwrite(ReadWrite.READ)) + rio.run([queue]) + queue.settle() + + assert get_prefetch_count(cache) > 0, \ + f"Prefetch should trigger in {cache_mode.name} mode" + + +@pytest.mark.parametrize("cache_mode", [CacheMode.PT, CacheMode.WO]) +@pytest.mark.parametrize("cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_64KiB]) +def test_prefetch_readahead_no_read_insert(pyocf_ctx, cache_mode, cls): + """ + Verify that prefetch does not trigger when the effective cache mode + does not insert reads (PT, WO). + """ + cache_device = RamVolume(Size.from_MiB(50)) + core_device = RamVolume(Size.from_MiB(100)) + + cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, + cache_line_size=cls) + core = Core.using_device(core_device) + cache.add_core(core) + + cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) + cache.set_prefetch_policy(PF_READAHEAD_MASK) + cache.set_prefetch_param(PrefetchPolicy.READAHEAD, + ReadaheadParams.THRESHOLD, 0) + + vol = CoreVolume(core) + queue = cache.get_default_queue() + + rio = (Rio().target(vol).bs(Size.from_KiB(4)).size(Size.from_MiB(1)) + .readwrite(ReadWrite.READ)) + rio.run([queue]) + queue.settle() + + assert get_prefetch_count(cache) == 0, \ + f"No prefetch should trigger in {cache_mode.name} mode"