diff --git a/app/no_auth_llext_overlay.conf b/app/no_auth_llext_overlay.conf new file mode 100644 index 000000000000..50ec2c7a41b2 --- /dev/null +++ b/app/no_auth_llext_overlay.conf @@ -0,0 +1,3 @@ +# Disable library authentication for testing on hardware without OTC key support +# This allows loading community-signed llext libraries without CSE authentication +CONFIG_LIBRARY_AUTH_SUPPORT=n diff --git a/app/shell_overlay.conf b/app/shell_overlay.conf index 963acd1d6e60..319857a70ba5 100644 --- a/app/shell_overlay.conf +++ b/app/shell_overlay.conf @@ -1,7 +1,7 @@ CONFIG_SHELL=y CONFIG_SHELL_HELP=y CONFIG_SHELL_CMDS=y -CONFIG_SHELL_LOG_BACKEND=n +CONFIG_SHELL_LOG_BACKEND=y CONFIG_SHELL_AUTOSTART=y CONFIG_SHELL_BACKEND_ADSP_MEMORY_WINDOW=y @@ -18,3 +18,10 @@ CONFIG_WINSTREAM_CONSOLE=y # these must be disabled in order to use the console. CONFIG_SOF_TELEMETRY_PERFORMANCE_MEASUREMENTS=n CONFIG_SOF_TELEMETRY_IO_PERFORMANCE_MEASUREMENTS=n +CONFIG_SOF_SHELL_LLEXT_LOAD=y +CONFIG_SOF_SHELL_LLEXT_LIST=y +CONFIG_SOF_SHELL_LLEXT_PURGE=y +CONFIG_SOF_SHELL_CORE_POWER=y + +# Disable REBOOT since qemu_xtensa does not implement sys_arch_reboot +CONFIG_REBOOT=n diff --git a/app/src/main.c b/app/src/main.c index 12cc3ffdc73a..d115780d97dd 100644 --- a/app/src/main.c +++ b/app/src/main.c @@ -79,8 +79,12 @@ void test_main(void) sof_app_main(); #if CONFIG_SOF_BOOT_TEST && defined(QEMU_BOOT_TESTS) sof_run_boot_tests(); +#ifdef CONFIG_SHELL + k_sleep(K_FOREVER); +#else qemu_xtensa_exit(0); #endif +#endif } #else int main(void) diff --git a/scripts/sof-qemu-run.py b/scripts/sof-qemu-run.py index fc985e254d51..6552626352e9 100755 --- a/scripts/sof-qemu-run.py +++ b/scripts/sof-qemu-run.py @@ -148,6 +148,18 @@ def main(): crashed = check_for_crash(full_output) + shell_enabled = False + config_file = os.path.join(build_dir, "zephyr", ".config") + if not os.path.isfile(config_file): + config_file = os.path.join("zephyr", ".config") + + if os.path.isfile(config_file): + with open(config_file, "r") as f: + for line in f: + if line.strip() == "CONFIG_SHELL=y": + shell_enabled = True + break + if crashed: print("\n[sof-qemu-run] Detected crash signature in standard output!") # Stop QEMU if it's still running @@ -156,6 +168,14 @@ def main(): child.close(force=True) run_sof_crash_decode(build_dir, full_output) + elif shell_enabled: + print("\n[sof-qemu-run] Shell is enabled. Entering interactive mode...") + if child.isalive(): + try: + child.interact() + except Exception as e: + print(f"Error during interaction: {e}") + child.close(force=True) else: print("\n[sof-qemu-run] No crash detected. Interacting with QEMU Monitor to grab registers...") diff --git a/scripts/sof-qemu-run.sh b/scripts/sof-qemu-run.sh index e1ece1dd5125..9648b1eb711e 100755 --- a/scripts/sof-qemu-run.sh +++ b/scripts/sof-qemu-run.sh @@ -2,8 +2,25 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2026 Intel Corporation. All rights reserved. -# Define the build directory from the first argument (or default) -BUILD_DIR="${1:-build}" +# Parse arguments to determine the build directory +BUILD_DIR="build" + +while [[ $# -gt 0 ]]; do + case "$1" in + --build-dir) + BUILD_DIR="$2" + shift 2 + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + BUILD_DIR="$1" + shift + ;; + esac +done # Find and source the zephyr environment script, typically via the sof-venv wrapper # or directly if running in a known zephyrproject layout. @@ -24,5 +41,5 @@ source ${VENV_DIR}/bin/activate cd "${BUILD_DIR}" || exit 1 # Finally run the python script which will now correctly inherit 'west' from the sourced environment. -python3 "${SCRIPT_DIR}/sof-qemu-run.py" --build-dir "${BUILD_DIR}" +python3 "${SCRIPT_DIR}/sof-qemu-run.py" --build-dir . diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index e46fc10b9521..fab826557849 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -228,6 +228,66 @@ struct ipc_cmd_hdr *mailbox_validate(void); */ void ipc_cmd(struct ipc_cmd_hdr *_hdr); +/** + * \brief Lightweight IPC counters and last-message snapshot for diagnostics. + * + * Populated by the platform RX/TX hooks. Safe to read from any context; + * fields are 32-bit and updated under @ref ipc::lock when written. + */ +struct ipc_stats { + uint32_t rx_count; /**< total IPC messages received */ + uint32_t tx_count; /**< total IPC messages sent */ + uint32_t tx_direct_count; /**< messages sent via the direct path */ + uint32_t rx_errors; /**< RX path errors / unknown targets */ + uint32_t tx_errors; /**< TX path send failures */ + + /* last RX */ + uint32_t last_rx_pri; + uint32_t last_rx_ext; + uint64_t last_rx_time; /**< platform cycles */ + + /* last TX */ + uint32_t last_tx_pri; + uint32_t last_tx_ext; + uint64_t last_tx_time; +}; + +/** + * \brief Record an inbound IPC for the stats snapshot. + * + * @param[in] pri Primary header word. + * @param[in] ext Extension header word. + */ +void ipc_stats_record_rx(uint32_t pri, uint32_t ext); + +/** + * \brief Record an outbound IPC for the stats snapshot. + * + * @param[in] pri Primary header word. + * @param[in] ext Extension header word. + * @param[in] direct True if the message used the "direct" send path. + * @param[in] err Send result (negative on error). + */ +void ipc_stats_record_tx(uint32_t pri, uint32_t ext, bool direct, int err); + +/** + * \brief Increment the RX error counter without updating the last-message + * snapshot. Used for unknown targets / dispatch failures. + */ +void ipc_stats_inc_rx_error(void); + +/** + * \brief Read a copy of the current IPC statistics. + * + * @param[out] out Destination snapshot. + */ +void ipc_stats_get(struct ipc_stats *out); + +/** + * \brief Reset all IPC statistics counters. + */ +void ipc_stats_reset(void); + /** * \brief IPC message to be processed on other core. * @param[in] core Core id for IPC to be processed on. diff --git a/src/include/sof/lib/vregion.h b/src/include/sof/lib/vregion.h index 612443f5bc48..1ad0e0deae41 100644 --- a/src/include/sof/lib/vregion.h +++ b/src/include/sof/lib/vregion.h @@ -7,6 +7,8 @@ #include +struct shell; + #ifdef __cplusplus extern "C" { #endif @@ -123,6 +125,13 @@ void vregion_info(struct vregion *vr); */ void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t *start); +/** + * @brief Dump all virtual regions info + * + * @param[in] sh Shell context to print to. + */ +void vregion_info_all(const struct shell *sh); + #else /* CONFIG_SOF_VREGIONS */ #include @@ -176,6 +185,7 @@ static inline void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t if (size) *size = 0; } +static inline void vregion_info_all(const struct shell *sh) {} #endif /* CONFIG_SOF_VREGIONS */ diff --git a/src/include/sof/lib_manager.h b/src/include/sof/lib_manager.h index 29c226eb61a7..6011f1b717f6 100644 --- a/src/include/sof/lib_manager.h +++ b/src/include/sof/lib_manager.h @@ -250,4 +250,19 @@ void lib_notif_msg_send(struct ipc_msg *msg); */ void lib_notif_msg_clean(bool leave_one_handle); +/* + * \brief Purge (free) a loadable library from IMR/DRAM storage + * + * param[in] lib_id - library slot id (1 .. LIB_MANAGER_MAX_LIBS-1) + * + * Removes the library binary from DRAM/IMR and releases the + * lib_manager_mod_ctx entry so that the slot can be reused by a + * future LOAD_LIBRARY call. Returns -EBUSY if any module file from + * the library is still mapped in SRAM (i.e., has active instances + * or is still linked as a dependency). + * + * Return: 0 on success, negative errno on error. + */ +int lib_manager_purge_library(uint32_t lib_id); + #endif /* __SOF_LIB_MANAGER_H__ */ diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index bbdcbbecf3b4..4f7458aed8d3 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -158,6 +158,21 @@ struct scheduler_ops { * This operation is optional. */ int (*scheduler_restore)(void *data); + + /** + * Iterate over all tasks owned by the scheduler and invoke @p cb on each. + * The scheduler is responsible for taking its own lock around the walk. + * @param data Private data of selected scheduler. + * @param cb Callback called once per task; must not block. + * @param ctx Opaque context passed to @p cb. + * + * This operation is optional and exists only for diagnostics + * (e.g. shell commands). Schedulers that do not implement it are + * silently skipped by enumeration tools. + */ + void (*scheduler_dump_tasks)(void *data, + void (*cb)(struct task *task, void *ctx), + void *ctx); }; /** \brief Holds information about scheduler. */ diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index d0d248c9ec77..c160ac304784 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -34,8 +34,10 @@ #include #include #include +#include #include +#include LOG_MODULE_REGISTER(ipc, CONFIG_SOF_LOG_LEVEL); @@ -43,6 +45,114 @@ SOF_DEFINE_REG_UUID(ipc); DECLARE_TR_CTX(ipc_tr, SOF_UUID(ipc_uuid), LOG_LEVEL_INFO); +/* Lightweight IPC stats. Updated under ipc->lock on the write paths and + * read with the same lock from the shell. Single instance is sufficient + * since there is only one IPC instance per firmware image. + */ +static struct ipc_stats g_ipc_stats; + +void ipc_stats_record_rx(uint32_t pri, uint32_t ext) +{ + struct ipc *ipc = ipc_get(); + k_spinlock_key_t key; + + if (!ipc) { + g_ipc_stats.rx_count++; + g_ipc_stats.last_rx_pri = pri; + g_ipc_stats.last_rx_ext = ext; + g_ipc_stats.last_rx_time = sof_cycle_get_64(); + return; + } + + key = k_spin_lock(&ipc->lock); + g_ipc_stats.rx_count++; + g_ipc_stats.last_rx_pri = pri; + g_ipc_stats.last_rx_ext = ext; + g_ipc_stats.last_rx_time = sof_cycle_get_64(); + k_spin_unlock(&ipc->lock, key); +} + +void ipc_stats_record_tx(uint32_t pri, uint32_t ext, bool direct, int err) +{ + struct ipc *ipc = ipc_get(); + k_spinlock_key_t key; + + if (!ipc) { + if (err < 0) + g_ipc_stats.tx_errors++; + else if (direct) + g_ipc_stats.tx_direct_count++; + else + g_ipc_stats.tx_count++; + g_ipc_stats.last_tx_pri = pri; + g_ipc_stats.last_tx_ext = ext; + g_ipc_stats.last_tx_time = sof_cycle_get_64(); + return; + } + + key = k_spin_lock(&ipc->lock); + if (err < 0) { + g_ipc_stats.tx_errors++; + } else { + if (direct) + g_ipc_stats.tx_direct_count++; + else + g_ipc_stats.tx_count++; + g_ipc_stats.last_tx_pri = pri; + g_ipc_stats.last_tx_ext = ext; + g_ipc_stats.last_tx_time = sof_cycle_get_64(); + } + k_spin_unlock(&ipc->lock, key); +} + +void ipc_stats_inc_rx_error(void) +{ + struct ipc *ipc = ipc_get(); + k_spinlock_key_t key; + + if (!ipc) { + g_ipc_stats.rx_errors++; + return; + } + + key = k_spin_lock(&ipc->lock); + g_ipc_stats.rx_errors++; + k_spin_unlock(&ipc->lock, key); +} + +void ipc_stats_get(struct ipc_stats *out) +{ + struct ipc *ipc = ipc_get(); + k_spinlock_key_t key; + + if (!out) + return; + + if (!ipc) { + *out = g_ipc_stats; + return; + } + + key = k_spin_lock(&ipc->lock); + *out = g_ipc_stats; + k_spin_unlock(&ipc->lock, key); +} + +void ipc_stats_reset(void) +{ + struct ipc *ipc = ipc_get(); + k_spinlock_key_t key; + + if (!ipc) { + memset(&g_ipc_stats, 0, sizeof(g_ipc_stats)); + return; + } + + key = k_spin_lock(&ipc->lock); + memset(&g_ipc_stats, 0, sizeof(g_ipc_stats)); + k_spin_unlock(&ipc->lock, key); +} + int ipc_process_on_core(uint32_t core, bool blocking) { struct ipc *ipc = ipc_get(); diff --git a/src/ipc/ipc-zephyr.c b/src/ipc/ipc-zephyr.c index 66ada4ddaa05..68821c4623ca 100644 --- a/src/ipc/ipc-zephyr.c +++ b/src/ipc/ipc-zephyr.c @@ -229,6 +229,8 @@ enum task_state ipc_platform_do_cmd(struct ipc *ipc) hdr = ipc_compact_read_msg(); + ipc_stats_record_rx(((uint32_t *)hdr)[0], ((uint32_t *)hdr)[1]); + /* perform command */ ipc_cmd(hdr); @@ -272,13 +274,17 @@ void ipc_platform_complete_cmd(struct ipc *ipc) int ipc_platform_send_msg(const struct ipc_msg *msg) { + int ret; + if (ipc_service_get_tx_buffer_size(&sof_ipc_ept) == 0) return -EBUSY; /* prepare the message and copy to mailbox */ struct ipc_cmd_hdr *hdr = ipc_prepare_to_send(msg); - return ipc_service_send(&sof_ipc_ept, hdr, sizeof(*hdr)); + ret = ipc_service_send(&sof_ipc_ept, hdr, sizeof(*hdr)); + ipc_stats_record_tx(((uint32_t *)hdr)[0], ((uint32_t *)hdr)[1], false, ret); + return ret; } void ipc_platform_send_msg_direct(const struct ipc_msg *msg) @@ -287,6 +293,8 @@ void ipc_platform_send_msg_direct(const struct ipc_msg *msg) struct ipc_cmd_hdr *hdr = ipc_prepare_to_send(msg); int ret = ipc_service_send_critical(&sof_ipc_ept, hdr, sizeof(*hdr)); + ipc_stats_record_tx(((uint32_t *)hdr)[0], ((uint32_t *)hdr)[1], true, ret); + if (ret < 0) tr_err(&ipc_tr, "ipc_service_send_critical() failed: %d", ret); } diff --git a/src/ipc/ipc4/handler-kernel.c b/src/ipc/ipc4/handler-kernel.c index ae75ebc87303..70f361153cea 100644 --- a/src/ipc/ipc4/handler-kernel.c +++ b/src/ipc/ipc4/handler-kernel.c @@ -47,6 +47,7 @@ #endif #include +#include #include #include #include @@ -212,8 +213,31 @@ __cold static int ipc4_load_library(struct ipc4_message_request *ipc4) ret = lib_manager_load_library(library.header.r.dma_id, library.header.r.lib_id, ipc4->primary.r.type); - if (ret != 0) - return (ret == -EINVAL) ? IPC4_ERROR_INVALID_PARAM : IPC4_FAILURE; + if (ret != 0) { + log_panic(); /* flush all pending log messages before replying */ + /* Encode specific error to allow diagnosis from dmesg IPC status code */ + switch (ret) { + case -EINVAL: + return IPC4_ERROR_INVALID_PARAM; /* 1 - invalid param */ + case -ENOMEM: + return IPC4_OUT_OF_MEMORY; /* 3 - out of memory */ + case -EFAULT: + return 4; /* 4 - EFAULT */ + case -ENOENT: + return 9; /* 9 - resource not found (symbol) */ + case -ENOEXEC: + return IPC4_INVALID_MANIFEST; /* 14 - invalid ELF */ + case -EPROTO: + return 15; /* 15 - protocol error (EPROTO) */ + case -EBUSY: + return 42; /* 42 - EBUSY */ + case -ETIMEDOUT: + return 43; /* 43 - DMA timeout */ + default: + /* encode negative errno as 100+ value for diagnosis */ + return (-ret < 50) ? (-ret + 100) : IPC4_FAILURE; + } + } return IPC4_SUCCESS; } @@ -565,6 +589,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) /* should not reach here as we only have 2 message types */ ipc_cmd_err(&ipc_tr, "ipc4: invalid target %d", target); err = IPC4_UNKNOWN_MESSAGE_TYPE; + ipc_stats_inc_rx_error(); } /* FW sends an ipc message to host if request bit is clear */ diff --git a/src/library_manager/lib_manager.c b/src/library_manager/lib_manager.c index b81ec6bbe738..99cd682e6219 100644 --- a/src/library_manager/lib_manager.c +++ b/src/library_manager/lib_manager.c @@ -16,6 +16,8 @@ #include #include +#include + #include #include #include @@ -868,11 +870,16 @@ static int lib_manager_dma_deinit(struct lib_manager_dma_ext *dma_ext, uint32_t static int lib_manager_load_data_from_host(struct lib_manager_dma_ext *dma_ext, uint32_t size) { - uint64_t timeout = k_ms_to_cyc_ceil64(200); + uint64_t timeout = k_ms_to_cyc_ceil64(600); struct dma_status stat; + uint32_t init_wp; int ret; - /* Wait till whole data acquired with timeout of 200ms */ + /* Capture initial WP for diagnostics */ + ret = dma_get_status(dma_ext->chan->dma->z_dev, dma_ext->chan->index, &stat); + init_wp = stat.write_position; + + /* Wait till whole data acquired with timeout of 600ms */ timeout += sof_cycle_get_64(); for (;;) { @@ -887,7 +894,10 @@ static int lib_manager_load_data_from_host(struct lib_manager_dma_ext *dma_ext, k_usleep(100); } - tr_err(&lib_manager_tr, "timeout during DMA transfer"); + tr_err(&lib_manager_tr, + "DMA timeout: plen=%u wp=%u rp=%u sz=%u init_wp=%u", + stat.pending_length, stat.write_position, stat.read_position, size, + init_wp); return -ETIMEDOUT; } @@ -988,6 +998,7 @@ static int lib_manager_store_library(struct lib_manager_dma_ext *dma_ext, ret = lib_manager_store_data(dma_ext, (uint8_t __sparse_cache *)library_base_address + MAN_MAX_SIZE_V1_8, preload_size - MAN_MAX_SIZE_V1_8); if (ret < 0) { + tr_err(&lib_manager_tr, "lib_manager_store_data(rest) failed: %d", ret); rfree((__sparse_force void *)library_base_address); return ret; } @@ -1100,6 +1111,7 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) } lib_manager_init(); + LOG_ERR("CANARY lib_id=%u type=%u", lib_id, type); _ext_lib = ext_lib_get(); @@ -1120,6 +1132,7 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) MAN_MAX_SIZE_V1_8, CONFIG_MM_DRV_PAGE_SIZE); if (!man_tmp_buffer) { ret = -ENOMEM; + LOG_ERR("man_tmp_buffer alloc failed"); goto cleanup; } @@ -1173,6 +1186,60 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) if (!ret) tr_info(&lib_manager_tr, "loaded library id: %u", lib_id); + else + LOG_ERR("lib_manager_load_library FAILED ret=%d", ret); return ret; } + +int lib_manager_purge_library(uint32_t lib_id) +{ + struct ext_library *ext_lib = ext_lib_get(); + struct lib_manager_mod_ctx *ctx; + unsigned int i; + + if (!lib_id || lib_id >= LIB_MANAGER_MAX_LIBS) + return -EINVAL; + + ctx = ext_lib->desc[lib_id]; + if (!ctx || !ctx->base_addr) + return -ENOENT; + +#if CONFIG_LLEXT + /* Refuse if any module file is still mapped in SRAM */ + if (ctx->mod) { + for (i = 0; i < ctx->n_mod; i++) { + if (ctx->mod[i].mapped) { + tr_err(&lib_manager_tr, + "lib %u mod[%u] still in SRAM", + lib_id, i); + return -EBUSY; + } + /* Auxiliary libs linked via llext_manager_add_library + * have an ebl/llext but no mapped SRAM; still in use if + * n_dependent > 0. */ + if (ctx->mod[i].n_dependent) { + tr_err(&lib_manager_tr, + "lib %u mod[%u] still has %u dependents", + lib_id, i, ctx->mod[i].n_dependent); + return -EBUSY; + } + } + rfree(ctx->mod); + ctx->mod = NULL; + } +#else + (void)i; +#endif /* CONFIG_LLEXT */ + + /* Free the DRAM/IMR storage buffer */ + rfree(ctx->base_addr); + ctx->base_addr = NULL; + + /* Free the context itself and clear the global descriptor slot */ + rfree(ctx); + ext_lib->desc[lib_id] = NULL; + + tr_info(&lib_manager_tr, "purged library id: %u", lib_id); + return 0; +} diff --git a/src/library_manager/llext_manager.c b/src/library_manager/llext_manager.c index a370a5f1c7f7..cad84497ae16 100644 --- a/src/library_manager/llext_manager.c +++ b/src/library_manager/llext_manager.c @@ -394,8 +394,10 @@ static int llext_manager_link(const char *name, }; ret = llext_load(ldr, name, llext, &ldr_parm); - if (ret) + if (ret) { + tr_err(&lib_manager_tr, "llext_load failed: ret=%d", ret); return ret; + } } /* All code sections */ @@ -448,7 +450,12 @@ static int llext_manager_link(const char *name, *mod_manifest = llext_peek(ldr, hdr->sh_offset); } - return *buildinfo && *mod_manifest ? 0 : -EPROTO; + int link_ret = *buildinfo && *mod_manifest ? 0 : -EPROTO; + + if (link_ret) + tr_err(&lib_manager_tr, "llext_manager_link: buildinfo=%p mod_manifest=%p ret=%d", + *buildinfo, *mod_manifest, link_ret); + return link_ret; } /* Count "module files" in the library, allocate and initialize memory for their descriptors */ @@ -604,7 +611,6 @@ static int llext_manager_link_single(uint32_t module_id, const struct sof_man_fw mod_array[entry_index].name, (*mod_manifest)->module.name); return -ENOEXEC; } - return mod_ctx_idx; } @@ -1068,8 +1074,10 @@ int llext_manager_add_library(uint32_t module_id) int ret = llext_manager_link_single(module_id + i, desc, ctx, (const void **)&buildinfo, &mod_manifest); - if (ret < 0) + if (ret < 0) { + tr_err(&lib_manager_tr, "llext_manager_link_single failed: %d", ret); return ret; + } } } diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index fdb10c7201af..8459e2c2bf55 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -336,6 +336,23 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta return 0; } +static void scheduler_dp_dump_tasks(void *data, + void (*cb)(struct task *task, void *ctx), + void *ctx) +{ + struct scheduler_dp_data *dp_sch = data; + struct list_item *tlist; + unsigned int lock_key; + + lock_key = scheduler_dp_lock(cpu_get_id()); + list_for_item(tlist, &dp_sch->tasks) { + struct task *task = container_of(tlist, struct task, list); + + cb(task, ctx); + } + scheduler_dp_unlock(lock_key); +} + static struct scheduler_ops schedule_dp_ops = { .schedule_task = scheduler_dp_task_shedule, #if CONFIG_SOF_USERSPACE_APPLICATION @@ -344,6 +361,7 @@ static struct scheduler_ops schedule_dp_ops = { .schedule_task_cancel = scheduler_dp_task_stop, #endif .schedule_task_free = scheduler_dp_task_free, + .scheduler_dump_tasks = scheduler_dp_dump_tasks, }; int scheduler_dp_init(void) diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 575a82d91dda..af2f6a37a91e 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -511,6 +511,23 @@ static void zephyr_ll_scheduler_free(void *data, uint32_t flags) sch->n_tasks); } +static void zephyr_ll_dump_tasks(void *data, + void (*cb)(struct task *task, void *ctx), + void *ctx) +{ + struct zephyr_ll *sch = data; + struct list_item *list; + uint32_t flags = 0; + + zephyr_ll_lock(sch, &flags); + list_for_item(list, &sch->tasks) { + struct task *task = container_of(list, struct task, list); + + cb(task, ctx); + } + zephyr_ll_unlock(sch, &flags); +} + static const struct scheduler_ops zephyr_ll_ops = { .schedule_task = zephyr_ll_task_schedule, .schedule_task_before = zephyr_ll_task_schedule_before, @@ -518,6 +535,7 @@ static const struct scheduler_ops zephyr_ll_ops = { .schedule_task_free = zephyr_ll_task_free, .schedule_task_cancel = zephyr_ll_task_cancel, .scheduler_free = zephyr_ll_scheduler_free, + .scheduler_dump_tasks = zephyr_ll_dump_tasks, }; #if CONFIG_SOF_USERSPACE_LL diff --git a/src/schedule/zephyr_twb_schedule.c b/src/schedule/zephyr_twb_schedule.c index aee61360d697..1baf969c123e 100644 --- a/src/schedule/zephyr_twb_schedule.c +++ b/src/schedule/zephyr_twb_schedule.c @@ -342,10 +342,28 @@ static int scheduler_twb_task_free(void *data, struct task *task) return 0; } +static void scheduler_twb_dump_tasks(void *data, + void (*cb)(struct task *task, void *ctx), + void *ctx) +{ + struct scheduler_twb_data *twb_sch = data; + struct list_item *tlist; + unsigned int key; + + key = scheduler_twb_lock(); + list_for_item(tlist, &twb_sch->tasks) { + struct task *task = container_of(tlist, struct task, list); + + cb(task, ctx); + } + scheduler_twb_unlock(key); +} + static struct scheduler_ops schedule_twb_ops = { .schedule_task = scheduler_twb_task_shedule, .schedule_task_cancel = scheduler_twb_task_cancel, .schedule_task_free = scheduler_twb_task_free, + .scheduler_dump_tasks = scheduler_twb_dump_tasks, }; int scheduler_twb_init(void) diff --git a/zephyr/Kconfig b/zephyr/Kconfig index f1d1896c4234..d10fc1c18fec 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -253,6 +253,372 @@ config SOF_ZEPHYR_NO_SOF_CLOCK Do not use SOF clk.h interface to set the DSP clock frequency. Requires implementation of platform/lib/clk.h. +menu "SOF shell commands" + depends on SHELL + +config SOF_SHELL_HEAP_USAGE + bool "Module heap usage command" + default y + depends on SHELL + help + Enables the 'sof module_heap_usage' shell command which prints + per-module heap allocation and high-water-mark for all active + IPC4 components. + +config SOF_SHELL_PIPELINE_STATUS + bool "Pipeline status command" + default y + depends on SHELL + help + Enables the 'sof pipeline_status' shell command which lists all + active IPC4 pipelines with their core, priority, period and state. + +config SOF_SHELL_MODULE_STATUS + bool "Module (component) status command" + default y + depends on SHELL + help + Enables the 'sof module_status' shell command which lists all + active IPC4 components with their pipeline, core and state. + +config SOF_SHELL_CORE_STATUS + bool "Core status command" + default y + depends on SHELL + help + Enables the 'sof core_status' shell command which reports the + enabled/active state of each DSP core. + +config SOF_SHELL_CORE_POWER + bool "Secondary core power on/off commands" + default y + depends on SHELL && MULTICORE && SMP + help + Enables the 'sof core_on ' and 'sof core_off ' shell + commands for powering secondary DSP cores on and off at runtime. + + Core 0 (primary) is not a valid target — these commands only accept + core IDs in the range [1 .. CONFIG_CORE_COUNT-1]. + + core_on calls cpu_enable_core() which starts the secondary Zephyr + CPU and runs secondary_core_init(). core_off calls + cpu_disable_core() which requests PM_STATE_SOFT_OFF on the core + and waits for it to halt before asserting the reset line. + +config SOF_SHELL_SRAM_STATUS + bool "SRAM heap status command" + default y + depends on SHELL && SYS_HEAP_RUNTIME_STATS + help + Enables the 'sof sram_status' shell command which reports HPSRAM + heap allocated, free and peak-allocated bytes. + Requires CONFIG_SYS_HEAP_RUNTIME_STATS=y. + +config SOF_SHELL_CLOCK_STATUS + bool "Clock status command" + default y + depends on SHELL && !SOF_ZEPHYR_NO_SOF_CLOCK + help + Enables the 'sof clock_status' shell command which reports the + current CPU clock frequency for each DSP core. + +config SOF_SHELL_MODULE_LIST + bool "Module list command" + default y + depends on SHELL + help + Enables the 'sof module_list' shell command which prints all modules + known to the firmware. On IPC4 Intel platforms (IPC4_BASE_FW_INTEL) + the manifest is consulted and the output includes: + - module name (8-char from manifest) + - UUID + - maximum instance count + - BSS memory per instance (bytes) + - text segment size (bytes) + - affinity mask + - CPC (cycles per copy), IBS and OBS from module config + On other platforms the registered component driver list is shown + with UUID and name from the trace context. + +config SOF_SHELL_PIPELINE_OPS + bool "Pipeline construction and control commands" + default n + depends on SHELL && IPC_MAJOR_4 + help + Enables IPC4 pipeline construction and control commands accessible + from the SOF shell. These are intended for debug and bring-up use: + + sof ppl_create [prio] [pages] [core] [lp] + Create a new IPC4 pipeline. + + sof ppl_delete + Delete a pipeline and all its module instances. + + sof ppl_state + Transition a pipeline to the given state. + + sof mod_init [core] [dp] + Instantiate a module (by module_id from module_list) into a + pipeline. No init-data is sent; only modules that have a + working zero-parameter default config will init successfully. + + sof mod_delete + Delete a module instance. + + sof mod_bind [sq] [dq] + Bind (connect) the output of src module to the input of dst. + + sof mod_unbind [sq] [dq] + Unbind (disconnect) two previously bound module instances. + +config SOF_SHELL_MMU_DBG + bool "MMU / TLB debug commands" + default y + depends on SHELL && (MM_DRV_INTEL_ADSP_MTL_TLB || XTENSA_MMU) + help + Enables MMU/TLB debug shell commands. + + On Intel ADSP MTL/PTL (CONFIG_MM_DRV_INTEL_ADSP_MTL_TLB=y): + sof mmu_status — VM layout, mapped/free pages, region list + sof tlb_dump — dump all active 16-bit MMIO TLB entries + sof tlb_lookup — query a page or page range (VA/PA/R/W/X/bank) + + On Xtensa MMU platforms (CONFIG_XTENSA_MMU=y, e.g. PTL): + sof rasid — decode the RASID hardware register (ring→ASID map) + sof page_info — probe DTLB for a page or range; report + physical address, ring, ASID, cache mode, R/W/X + (PTL has both sets of commands). + +config SOF_SHELL_LLEXT_LOAD + bool "Interactive llext module load command" + default y + depends on SHELL && LIBRARY_MANAGER + help + Enables the 'sof llext_load' shell command which lets a developer + load a compiled llext module from the host file system into the DSP + at run time without a full firmware restart. + + Usage (2-step interactive flow): + + Step 1 — on the DSP shell: + uart:~$ sof llext_load mymodule 1 + + The DSP allocates an ADSP debug window slot, sets the handshake + state to REQUESTING, and waits (up to 120 s) for the host. + + Step 2 — on the Linux host: + $ cat mymodule.ri > /sys/kernel/debug/sof/llext_load + + The kernel driver reads the slot, performs the HDA DMA transfer + (IPC4 LOAD_LIBRARY_PREPARE + LOAD_LIBRARY sequence), and writes + the result back to the slot. The DSP shell command then wakes up + and prints the outcome. + + Requires CONFIG_SND_SOC_SOF_CLIENT_LLEXT_LOAD in the Linux kernel. + +config SOF_SHELL_LLEXT_LOAD_SLOT_NUM + int "Debug window slot for llext_load (fallback without slot manager)" + default 2 + depends on SOF_SHELL_LLEXT_LOAD && !INTEL_ADSP_DEBUG_SLOT_MANAGER + help + Index of the ADSP debug window slot (0-14) reserved for the + shell llext_load handshake when CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER + is not available. Must not conflict with TRACE/TELEMETRY/SHELL slots. + Default slot 2 is normally free on this configuration. + +config SOF_SHELL_LLEXT_LIST + bool "llext library listing command" + default y + depends on SHELL && LIBRARY_MANAGER + help + Enables the 'sof llext_list' shell command. Iterates over all + loaded llext libraries in IMR/DRAM and prints, for each one: + base address, storage size, and per-module-file SRAM mapping state + (mapped/unmapped), Zephyr llext use-count, and dependency count. + + Useful for verifying that an llext load succeeded and for checking + whether any module is still active before attempting a purge. + +config SOF_SHELL_LLEXT_PURGE + bool "llext library purge command" + default y + depends on SHELL && LIBRARY_MANAGER + help + Enables the 'sof llext_purge ' shell command. Removes the + specified llext library from IMR/DRAM storage and frees all memory + associated with it. + + The command refuses if any module within the library is still mapped + into SRAM (i.e. an active pipeline is using it) and returns -EBUSY. + Tear down all pipelines that reference the library before purging. + +config SOF_SHELL_BUFFER_INFO + bool "Audio buffer list/info commands" + default y + depends on SHELL + help + Enables the 'sof buffer_list' and 'sof buffer_info ' shell + commands. buffer_list walks all components and prints every + downstream comp_buffer with its source/sink component IDs, current + fill level (size/avail/free), channel count, sample rate and + frame format. buffer_info dumps the same fields plus rptr/wptr, + flags and core for a single buffer selected by ID. + + Useful for diagnosing where audio is stuck in a pipeline (xrun, + underrun, mis-bind) without rebuilding firmware. + +config SOF_SHELL_SCHED_INFO + bool "Scheduler task list/load commands" + default y + depends on SHELL + help + Enables the 'sof sched_tasks' and 'sof sched_load' shell commands. + + sched_tasks walks every registered SOF scheduler (LL timer, LL DMA, + EDF, DP, TWB) and prints each task's scheduler type, core, priority, + state, flags and uid. + + sched_load prints per-task execution cycle counters (cycles_cnt, + cycles_sum, cycles_max, derived average) plus aggregate totals, + using the cycle counters that the schedulers already maintain in + struct task. Pairs naturally with 'sof test_inject_sched_gap' for + diagnosing scheduling latency. + + Each scheduler implements scheduler_dump_tasks() which takes its + own lock during the walk; schedulers without an implementation are + silently skipped. + +config SOF_SHELL_LOG_INFO + bool "Log subsystem status command" + default y + depends on SHELL && LOG + help + Enables the 'sof log_status' shell command, which lists every + registered Zephyr log backend with its index, internal id, + active/inactive state and name, plus the total number of + registered log sources in the local domain. + + Read-only and cheap; useful for confirming which backends + (uart_console, adsp_mtrace, adsp_hda, ...) are alive without + having to enable CONFIG_LOG_RUNTIME_FILTERING and the much + larger Zephyr 'log' shell module. + +config SOF_SHELL_MTRACE_DUMP + bool "mtrace SRAM buffer snapshot command" + default y + depends on SHELL && LOG_BACKEND_ADSP_MTRACE + help + Enables the 'sof mtrace_dump' shell command which prints the + unread portion of the ADSP mtrace SRAM ring buffer (the same + buffer normally consumed by host-side mtrace-reader.py). + + The snapshot does NOT advance the host_ptr, so it is safe to + use while a host consumer is running: the host will still + receive every byte. Useful when no host driver is attached + (e.g. early bring-up over UART shell only). + +config SOF_SHELL_MAILBOX_HEX + bool "Hex-dump SOF mailbox regions" + default y + depends on SHELL + help + Enables 'sof mailbox_hex [offset] [length]' which hex + dumps one of the SOF mailbox regions (exception, dspbox, hostbox, + debug). With no arguments lists the regions and their sizes. The + length is clamped to the region size; default 256 bytes. + + Useful for inspecting the panic dump area after a fatal error and + for low-level IPC payload debugging. + +config SOF_SHELL_DBGWIN_DUMP + bool "Hex-dump ADSP debug-window slots" + default y + depends on SHELL && SOC_FAMILY_INTEL_ADSP + help + Enables 'sof dbgwin_dump [slot] [length]' which lists the + descriptors of all ADSP debug-window slots (slot manager + metadata in window 2 page 0) with a friendly type name, and + hex-dumps the slot contents when a slot index is given. + + This makes it possible to peek at gdb_stub, telemetry, + critical_log, broken-marker and other slots from the shell + without host-side tooling. + +config SOF_SHELL_PERF_STATUS + bool "Performance / telemetry status command" + default y + depends on SHELL && SOF_TELEMETRY + help + Enables 'sof perf_status [reset|start|stop|pause]' which prints + the current ipc4_perf_measurements_state plus per-active-core + systick counters from the SOF telemetry slot: + count, last/max time elapsed (cycles), avg/peak KCPS and the + 4k/8k peak utilization buckets. + + With one of the optional sub-commands the command transitions + the perf measurement state machine ('start' calls + enable_performance_counters(), 'reset' calls + reset_performance_counters(); 'stop'/'pause' just update the + state) - useful for taking before/after snapshots without IPC. + +config SOF_SHELL_DAI_LIST + bool "DAI introspection command" + default y + depends on SHELL && ZEPHYR_NATIVE_DRIVERS + help + Enables 'sof dai_list' which iterates dai_get_device_list() + and prints, per DAI, the Zephyr device name, decoded type + (ssp/dmic/hda/alh/uaol/sai/esai/...), index, current channel + count, sample rate, format and word size from + dai_config_get(), plus per-direction fifo address, fifo depth, + DMA handshake id and stream id from dai_get_properties(). + + Useful for verifying which DAIs were registered for a topology + without going through host-side debug. + +config SOF_SHELL_DMA_STATUS + bool "DMA controller / channel status command" + default y + depends on SHELL && ZEPHYR_NATIVE_DRIVERS + help + Enables 'sof dma_status [dma_idx [chan]]': + + 'sof dma_status' - list every DMA controller + registered with SOF (id, + channel count, busy count, + caps/devs bitmasks, base, + Zephyr device name). + 'sof dma_status ' - per-channel status for one + controller. + 'sof dma_status ' - status for one channel. + + Per-channel status comes from sof_dma_get_status() (Zephyr + dma_get_status()) and shows busy/idle, direction, pending and + free bytes, read/write positions and total_copied. Pairs well + with 'sof dai_list' for diagnosing P2M/M2P paths. + +config SOF_SHELL_KCTL_LIST + bool "kcontrol introspection command" + default y + depends on SHELL + help + Enables 'sof kctl_list' which walks the IPC component list and + prints, per component, the comp_id, pipeline_id, core, decoded + module name (volume / gain / mixin / mixout / eqiir / src / + ...) taken from the trace UUID context, a coarse 'kind' tag + (volume / mixer / blob / config) for control-bearing modules, + and the current comp_state. + + This is read-only on purpose. Actual kcontrol get/set values + flow through per-module IPC4 large_config blobs + (set_configuration / get_configuration) which need + module-specific marshalling and are intentionally left to host + tools (tinymix, sof-ctl). Use this command together with 'sof + module_status' to identify which comp_id carries which control + before going through the host path. + +endmenu # SOF shell commands + config SOF_VREGIONS bool "Enable virtual memory regions" default y if ACE && !ACE_VERSION_1_5 && !ACE_VERSION_2_0 diff --git a/zephyr/include/sof/lib/vpage.h b/zephyr/include/sof/lib/vpage.h index f3fc8b89e968..0b2fed365269 100644 --- a/zephyr/include/sof/lib/vpage.h +++ b/zephyr/include/sof/lib/vpage.h @@ -31,6 +31,15 @@ void *vpage_alloc(unsigned int pages); */ void vpage_free(void *ptr); +struct shell; + +/** + * @brief Dump virtual page allocator status + * + * @param[in] sh Shell context to print to. + */ +void vpage_info(const struct shell *sh); + #ifdef __cplusplus } #endif diff --git a/zephyr/include/sof/shell_llext_load.h b/zephyr/include/sof/shell_llext_load.h new file mode 100644 index 000000000000..9ffb80ac9eb6 --- /dev/null +++ b/zephyr/include/sof/shell_llext_load.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2024 Intel Corporation. + * + * Author: Kai Vehmanen + */ + +/** + * @file shell_llext_load.h + * @brief Shared mailbox structure for the DSP shell "llext_load" command. + * + * The DSP shell command and the Linux "sof-client-llext-load" driver + * communicate through a single ADSP debug window slot identified by the + * ADSP_DW_SLOT_LLEXT_LOAD type. Both sides treat the first 96 bytes of the + * slot as a struct sof_shell_llext_slot. + * + * Handshake protocol + * ------------------ + * 1. DSP shell writes: magic, lib_id, name → state = REQUESTING + * 2. Host driver checks state == REQUESTING, reads lib_id + * 3. Host sets state = DMA_ACTIVE, starts HDA DMA + IPC4 load + * 4a. On success: host writes xfer_bytes, then state = DMA_DONE + * 4b. On failure: host writes result (errno), then state = ERROR + * 5. DSP shell detects state change, reports result, sets state = IDLE + * + * The binary layout must match the Linux copy in shell-llext-shm.h. + */ + +#ifndef __SOF_SHELL_LLEXT_LOAD_H__ +#define __SOF_SHELL_LLEXT_LOAD_H__ + +#include +#include /* ADSP_DW_SLOT_LLEXT_LOAD */ + +/** + * Magic placed in ->magic to indicate the DSP has initialized the slot. + * Reuses the slot-type value so a single constant identifies both. + */ +#define SOF_SHELL_LLEXT_MAGIC ADSP_DW_SLOT_LLEXT_LOAD + +/** + * Handshake states stored in struct sof_shell_llext_slot::state. + * The DSP writes REQUESTING; the host writes DMA_ACTIVE, DMA_DONE or ERROR. + */ +enum sof_shell_llext_state { + SOF_SHELL_LLEXT_IDLE = 0, /* no request pending */ + SOF_SHELL_LLEXT_REQUESTING = 1, /* DSP ready, waiting for the host to DMA */ + SOF_SHELL_LLEXT_DMA_ACTIVE = 2, /* host: copy / DMA in progress */ + SOF_SHELL_LLEXT_DMA_DONE = 3, /* host: library DMA + IPC load complete */ + SOF_SHELL_LLEXT_ERROR = 4, /* host: load failed, ->result holds errno */ +}; + +/** + * struct sof_shell_llext_slot — placed at offset 0 of the debug window slot. + * + * Total size: 96 bytes (the slot is 4 KB; the remainder is unused). + */ +struct sof_shell_llext_slot { + uint32_t magic; /**< SOF_SHELL_LLEXT_MAGIC when valid */ + uint32_t state; /**< enum sof_shell_llext_state */ + uint32_t lib_id; /**< library slot [1 .. LIB_MANAGER_MAX_LIBS - 1] */ + uint32_t xfer_bytes; /**< bytes transferred (written by host on success) */ + int32_t result; /**< 0 on success, negative errno on error */ + uint32_t reserved[3]; /**< pad, must be zero */ + char name[64]; /**< filename hint, NUL-terminated, display only */ +} __packed; + +#endif /* __SOF_SHELL_LLEXT_LOAD_H__ */ diff --git a/zephyr/lib/vpage.c b/zephyr/lib/vpage.c index ce0da7b5ac97..b722aafa8edb 100644 --- a/zephyr/lib/vpage.c +++ b/zephyr/lib/vpage.c @@ -12,6 +12,7 @@ #include #include #include +#include LOG_MODULE_REGISTER(vpage, CONFIG_SOF_LOG_LEVEL); @@ -251,6 +252,26 @@ void vpage_free(void *ptr) vpage_ctx.total_pages); } +void vpage_info(const struct shell *sh) +{ + k_mutex_lock(&vpage_ctx.lock, K_FOREVER); + + shell_fprintf(sh, SHELL_NORMAL, "Virtual Page Allocator Status:\n"); + shell_fprintf(sh, SHELL_NORMAL, " Region Base: %p, Size: %#zx bytes, Total Pages: %u\n", + (void *)vpage_ctx.virtual_region->addr, + vpage_ctx.virtual_region->size, vpage_ctx.total_pages); + shell_fprintf(sh, SHELL_NORMAL, " Free Pages: %u\n", vpage_ctx.free_pages); + shell_fprintf(sh, SHELL_NORMAL, " Allocated Elements in use: %u / %d\n", + vpage_ctx.num_elems_in_use, VPAGE_MAX_ALLOCS); + + for (unsigned int i = 0; i < vpage_ctx.num_elems_in_use; i++) { + shell_fprintf(sh, SHELL_NORMAL, " [%u] vpage %u, pages %u\n", + i, vpage_ctx.velems[i].vpage, vpage_ctx.velems[i].pages); + } + + k_mutex_unlock(&vpage_ctx.lock); +} + /** * @brief Initialize virtual page allocator * diff --git a/zephyr/lib/vregion.c b/zephyr/lib/vregion.c index 84af0d0645e6..6281e24dedc3 100644 --- a/zephyr/lib/vregion.c +++ b/zephyr/lib/vregion.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -15,6 +17,9 @@ LOG_MODULE_REGISTER(vregion, CONFIG_SOF_LOG_LEVEL); +static sys_dlist_t vregion_list = SYS_DLIST_STATIC_INIT(&vregion_list); +static K_MUTEX_DEFINE(vregion_list_lock); + /* * Pre Allocated Contiguous Virtual Memory Region Allocator * @@ -80,6 +85,8 @@ struct interim_heap { * TODO: Add support to flag which heaps should have their contexts saved and restored. */ struct vregion { + sys_dnode_t node; + /* region context */ uint8_t *base; /* base address of entire region */ size_t size; /* size of whole region in bytes */ @@ -164,6 +171,10 @@ struct vregion *vregion_create(size_t lifetime_size, size_t interim_size) LOG_DBG(" interim size %#zx at %p", interim_size, (void *)vr->interim.heap.heap.init_mem); LOG_DBG(" lifetime size %#zx at %p", lifetime_size, (void *)vr->lifetime.base); + k_mutex_lock(&vregion_list_lock, K_FOREVER); + sys_dlist_append(&vregion_list, &vr->node); + k_mutex_unlock(&vregion_list_lock); + return vr; } @@ -204,6 +215,11 @@ struct vregion *vregion_put(struct vregion *vr) /* log the vregion being destroyed */ LOG_DBG("destroy %p size %#zx pages %u", (void *)vr->base, vr->size, vr->pages); LOG_DBG(" lifetime used %zu free count %d", vr->lifetime.used, vr->lifetime.free_count); + + k_mutex_lock(&vregion_list_lock, K_FOREVER); + sys_dlist_remove(&vr->node); + k_mutex_unlock(&vregion_list_lock); + vpage_free(vr->base); return NULL; @@ -438,3 +454,29 @@ void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t *start) if (start) *start = (uintptr_t)vr->base; } + +void vregion_info_all(const struct shell *sh) +{ + struct vregion *vr; + int count = 0; + + k_mutex_lock(&vregion_list_lock, K_FOREVER); + + shell_fprintf(sh, SHELL_NORMAL, "Virtual Regions Status:\n"); + + SYS_DLIST_FOR_EACH_CONTAINER(&vregion_list, vr, node) { + k_mutex_lock(&vr->lock, K_FOREVER); + shell_fprintf(sh, SHELL_NORMAL, " [%d] Base: %p, Size: %#zx bytes, Pages: %u\n", + count++, (void *)vr->base, vr->size, vr->pages); + shell_fprintf(sh, SHELL_NORMAL, " Lifetime Used: %#zx bytes, Free Count: %d\n", + vr->lifetime.used, vr->lifetime.free_count); + shell_fprintf(sh, SHELL_NORMAL, " Use Count: %u\n", vr->use_count); + k_mutex_unlock(&vr->lock); + } + + if (count == 0) { + shell_fprintf(sh, SHELL_NORMAL, " No active virtual regions found.\n"); + } + + k_mutex_unlock(&vregion_list_lock); +} diff --git a/zephyr/shell.md b/zephyr/shell.md new file mode 100644 index 000000000000..9f706f87a5f5 --- /dev/null +++ b/zephyr/shell.md @@ -0,0 +1,431 @@ +# SOF Zephyr Shell Commands + +This document describes all SOF-specific shell commands available in the Zephyr RTOS environment. These commands are grouped under the `sof` parent command and provide diagnostic visibility into the firmware's runtime state. + +## Usage + +Access the Zephyr shell through the QEMU terminal or a hardware UART console. Prefix all commands with `sof`. + +```shell +uart:~$ sof [arguments] +``` + +## Available SOF Commands + +### 1. `sof version` +- **Description**: Prints the firmware version details. +- **Usage**: `sof version` +- **Output**: Major/Minor/Micro version, Git tag, and source hash. + +### 2. `sof module_heap_usage` +- **Description**: Dumps heap memory usage for all active audio modules. +- **Usage**: `sof module_heap_usage` +- **Output**: Component ID, current heap usage, and high-water mark (HWM) in bytes. + +### 3. `sof pipeline_list` +- **Description**: Lists all active audio pipelines in the system. +- **Usage**: `sof pipeline_list` +- **Output**: Pipeline ID, Core affinity, Status, Priority, and Period (us). + +### 4. `sof module_list` +- **Description**: Lists all instantiated audio modules (components). +- **Usage**: `sof module_list` +- **Output**: Component ID, Module Type, State, Pipeline ID, and Core affinity. + +### 5. `sof vpage_status` +- **Description**: Reports the status of the virtual page allocator. +- **Usage**: `sof vpage_status` +- **Output**: Base address, total/free pages, and a list of active virtual page allocations. +- **Dependency**: Requires `CONFIG_SOF_VREGIONS=y`. + +### 6. `sof vregion_status` +- **Description**: Reports status and metrics for all active virtual memory regions. +- **Usage**: `sof vregion_status` +- **Output**: Region base addresses, sizes, lifetime bytes used/free, and current reference counts. +- **Dependency**: Requires `CONFIG_SOF_VREGIONS=y`. + +### 7. `sof test_inject_sched_gap` +- **Description**: Injects a timing delay into the audio scheduling domain for stress testing. +- **Usage**: `sof test_inject_sched_gap [usec]` +- **Arguments**: `usec` (optional, default 1500) - microseconds to block the domain. +- **Warning**: Not reliable on SMP systems without explicit cross-core support. + +--- + +## Enabling Shell Commands + +Ensure the following Kconfig symbols are enabled in your build configuration: +- `CONFIG_SHELL=y` +- `CONFIG_SOF_VREGIONS=y` (for `vpage` and `vregion` commands) + +--- + +## Functional Areas Missing Shell Coverage + +Tracking list of subsystems that lack runtime shell visibility for control, +debug or testing. Items get ticked off as commands land on `topic/shell`. + +### Control + +| Area | Suggested commands | Status | +|---|---|---| +| **kcontrols / mixer** | `kctl_list`, `kctl_get `, `kctl_set ` | **DONE (task 8)** — `kctl_list` decodes module names + kind for every component. `kctl_get/set` deferred (per-module IPC4 large_config blobs — use host tools). | +| Module runtime config | `mod_config_get/set ` | TODO | +| Stream / copier | `stream_list`, `stream_pause/resume `, `copier_gain_set` | TODO | +| Clocks (extend `clock_status`) | `clock_set `, `clock_force ` | TODO | +| Power management | `pm_state`, `pm_force `, `pg_status`, `idle_stats` | TODO | +| Cache | `dcache_flush `, `dcache_inv`, `icache_inv` | TODO | +| Watchdog | `wdt_status`, `wdt_kick`, `wdt_disable` | TODO | +| **DAI / link control** | `dai_list`, `dai_status `, `dai_trigger`, `dai_loopback` | **DONE (task 7)** — `dai_list` covers introspection; trigger/loopback deferred (writeable, needs careful tplg coordination). | +| **DMA** | `dma_list`, `dma_chan_status `, `dma_stop` | **DONE (task 7)** — `dma_status` covers list+per-channel. `dma_stop` deferred (would corrupt active stream). | + +### Debug + +| Area | Suggested commands | Status | +|---|---|---| +| **IPC counters / last message** | `ipc_stats`, `ipc_last` | **DONE (task 1)** | +| IPC inject | `ipc_inject `, `ipc_queue` | TODO | +| Audio buffers | `buffer_list`, `buffer_info ` | **DONE (task 2)** | +| **Scheduler** | `sched_tasks`, `sched_load`, `task_info ` | **DONE (task 3)** | +| Logging / trace | `log_status`, `mtrace_dump` (snapshot) | **DONE (task 4)** — runtime per-source `log_level` deferred (needs `CONFIG_LOG_RUNTIME_FILTERING`, see notes) | +| **Telemetry / perf** | `perf_status`, `perf_status reset`, `perf_status start/stop/pause` | **DONE (task 6)** | +| Notifications | `notify_subscribers`, `notify_stats` | TODO | +| **Debug window / mailbox** | `dbgwin_dump `, `mailbox_hex` | **DONE (task 5)** | +| Crash / panic | `crash_log`, `crash_clear`, `panic_info`, `bt`, `regs` | TODO | +| Heap walk | `heap_walk `, `heap_blocks`, `obj_pool_stats` | TODO | +| Probes | `probe_init`, `probe_add `, `probe_remove`, `probe_dma_status` | TODO | +| Locks / IRQ / IDC | `mutex_stats`, `irq_stats`, `idc_stats` | TODO | + +### Testing / fault injection + +| Area | Suggested commands | Status | +|---|---|---| +| Fault injection | `test_alloc_fail `, `test_ipc_drop`, `test_dma_stall`, `test_xrun `, `test_panic` | TODO | +| Self-test | `selftest dma`, `selftest mem`, `selftest cache`, `selftest mmu`, `selftest llext` | TODO | +| Module unit-tests | `test_module ` | TODO | +| Loopback / signal gen | `tone_play `, `loopback_start `, `pcm_capture_dump` | TODO | +| Mock IPC4 payloads | `ipc_replay ` (via DMA slot) | TODO | +| Coverage / hooks | `cov_dump`, `assert_count`, `assert_clear` | TODO | + +### Quick-win order + +1. **`ipc_stats` / `ipc_last`** — DONE. +2. **`buffer_list` / `buffer_info`** — DONE. +3. **`sched_tasks` / `sched_load`** — DONE. +4. **`log_status` / `mtrace_dump`** — DONE. +5. **`mailbox_hex` / `dbgwin_dump`** — DONE (was originally `crash_log`/`bt`; pivoted because SOF panic.c isn't built on Zephyr and `bt` of a running CPU from itself isn't meaningful). +6. **`perf_status`** — DONE. +7. **`dai_list` / `dma_status`** — DONE. +8. **`kctl_get/set`** — DONE (`kctl_list` only; values stay on host). + +--- + +## Task 1 — `ipc_stats` / `ipc_last` + +### Commands + +| Command | Description | +|---|---| +| `sof ipc_stats` | Print RX/TX counters (`rx_count`, `rx_errors`, `tx_count`, `tx_direct_count`, `tx_errors`). | +| `sof ipc_stats reset` | Clear all counters. | +| `sof ipc_last` | Print the most recent RX and TX `pri`/`ext` headers and their platform-cycle timestamps. | + +### Implementation + +- Public API in `src/include/sof/ipc/common.h`: + - `struct ipc_stats` + - `ipc_stats_record_rx(pri, ext)` + - `ipc_stats_record_tx(pri, ext, direct, err)` + - `ipc_stats_inc_rx_error()` + - `ipc_stats_get(out)` / `ipc_stats_reset()` +- Storage and locking in `src/ipc/ipc-common.c` (single global, protected by + `ipc->lock`, IPC-version agnostic). +- RX hook: `ipc_platform_do_cmd()` in `src/ipc/ipc-zephyr.c`, just before + dispatch — captures the IPC3/IPC4 `pri`/`ext` words. +- TX hooks: `ipc_platform_send_msg()` and `ipc_platform_send_msg_direct()` in + the same file, after the platform send returns. +- Error hook: `ipc_cmd()` default branch in `src/ipc/ipc4/handler-kernel.c` + for unknown message targets. +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- Counters are 32-bit; they wrap at ~4 billion messages. +- IPC3 dispatch errors are not yet routed through `ipc_stats_inc_rx_error()`. +- A future task can add a small ring buffer of last-N IPC headers and + per-target counters (FW_GEN_MSG vs MODULE_MSG, plus per opcode). + +## Task 2 — `buffer_list` / `buffer_info` + +### Commands + +| Command | Description | +|---|---| +| `sof buffer_list` | List every audio buffer in the pipeline with source/sink component IDs, size, avail, free, channels, rate and frame format. | +| `sof buffer_info ` | Detailed info for a single buffer: source/sink comps, core, flags, size/avail/free bytes, rptr, wptr, channels, rate, frame format. | + +### Implementation + +- Buffers are enumerated by walking `ipc->comp_list`; for each + `COMP_TYPE_COMPONENT` we walk its `bsink_list` via + `comp_dev_get_first_data_consumer()` / + `comp_dev_get_next_data_consumer()`. Each buffer therefore appears + exactly once (it is the sink of exactly one source component) and the + same enumeration works on both IPC3 and IPC4. +- `buf_get_id()` from [src/include/sof/audio/buffer.h](src/include/sof/audio/buffer.h) + is used as the buffer identifier. +- Stream metrics use the existing `audio_stream_get_*()` accessors from + [src/include/sof/audio/audio_stream.h](src/include/sof/audio/audio_stream.h). +- New Kconfig `CONFIG_SOF_SHELL_BUFFER_INFO` (default `y`). +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- Today only fill-level snapshots are reported; high-water mark and + underrun/overrun counters are not tracked in `comp_buffer` and would + require new instrumentation. +- `buffer_info` does not yet decode `flags` symbolically. +- A future enhancement could add `--core ` filtering and per-buffer + topology graph output. + +## Task 3 — `sched_tasks` / `sched_load` + +### Commands + +| Command | Description | +|---|---| +| `sof sched_tasks` | List every task across all SOF schedulers (LL timer, LL DMA, EDF, DP, TWB) with type, core, priority, state, flags and uid. | +| `sof sched_load` | Per-task cycle counters (cycles_cnt, cycles_sum, cycles_max, derived average) plus aggregate totals. Pairs with `test_inject_sched_gap`. | + +### Implementation + +- New optional op `scheduler_dump_tasks(data, cb, ctx)` added to + `struct scheduler_ops` in + [src/include/sof/schedule/schedule.h](src/include/sof/schedule/schedule.h). +- Implemented for the Zephyr schedulers under their own locks: + - [src/schedule/zephyr_ll.c](src/schedule/zephyr_ll.c) (LL timer / LL DMA) + - [src/schedule/zephyr_twb_schedule.c](src/schedule/zephyr_twb_schedule.c) + - [src/schedule/zephyr_dp_schedule.c](src/schedule/zephyr_dp_schedule.c) +- Shell walks the global scheduler list via `arch_schedulers_get()` and + invokes the op on every scheduler that provides one; schedulers + without an implementation are silently skipped. +- Cycle counters are read from existing `task->cycles_sum`, + `task->cycles_max`, `task->cycles_cnt` fields already maintained by + the schedulers. +- New Kconfig `CONFIG_SOF_SHELL_SCHED_INFO` (default `y`). +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- The xtos LL scheduler is not yet covered (not built on Zephyr ACE + targets). +- `task_info ` lookup, deadline-miss counts and per-core + aggregation could be added on top of the same op. + +## Task 4 — `log_status` / `mtrace_dump` + +### Commands + +| Command | Description | +|---|---| +| `sof log_status` | List every Zephyr log backend (idx, internal id, active state, name) plus the total number of registered log sources in the local domain. Read-only. | +| `sof mtrace_dump` | Print the unread portion of the ADSP mtrace SRAM ring buffer as a snapshot, *without* advancing `host_ptr`. Safe to use while host-side `mtrace-reader.py` is running. | + +### Implementation + +- `log_status` uses Zephyr's public log backend API + (`log_backend_count_get()`, `log_backend_get()`, + `log_backend_is_active()`, `log_backend_id_get()`, + `log_src_cnt_get()`); no new state is added. +- `mtrace_dump` re-acquires the existing mtrace slot: + - With `CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER=y` (default on PTL/MTL/LNL): + via `adsp_dw_request_slot()` with the same descriptor type + (`ADSP_DW_SLOT_DEBUG_LOG | core 0`); the slot manager returns the + already-allocated slot. + - Otherwise: directly indexes + `ADSP_DW->slots[ADSP_DW_SLOT_NUM_MTRACE]`. + - The slot layout (`{host_ptr, dsp_ptr, data[]}`) mirrors the one in + [zephyr/subsys/logging/backends/log_backend_adsp_mtrace.c](../../zephyr/subsys/logging/backends/log_backend_adsp_mtrace.c). + - We read from `host_ptr` to `dsp_ptr` byte-by-byte and write to the + shell, but never store back to `host_ptr`, so the host-side + consumer keeps seeing the same bytes. +- Two new Kconfigs (default `y`): + - `CONFIG_SOF_SHELL_LOG_INFO` (depends on `LOG`) + - `CONFIG_SOF_SHELL_MTRACE_DUMP` (depends on `LOG_BACKEND_ADSP_MTRACE`) +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- Per-source runtime `log_level` setting was deliberately deferred: it + requires `CONFIG_LOG_RUNTIME_FILTERING=y` (extra per-call overhead) + and Zephyr already ships an equivalent `log` shell command + (`CONFIG_LOG_CMDS=y`). If we ever need it we should reuse Zephyr's + command rather than reimplement it. +- `mtrace_dump` shows raw text exactly as the backend formatted it; a + future option could format output in pages or filter by severity. +- A `mtrace_dump --consume` mode (advance `host_ptr`) is intentionally + not provided to avoid silently breaking host-side tooling. + +## Task 5 — `mailbox_hex` / `dbgwin_dump` + +### Commands + +| Command | Description | +|---|---| +| `sof mailbox_hex` | List the four SOF mailbox regions (exception, dspbox, hostbox, debug) with their base address and size. | +| `sof mailbox_hex [off] [len]` | Hex-dump a mailbox region; offset and length are clamped to the region size. Default length 256 bytes. | +| `sof dbgwin_dump` | List all 15 ADSP debug-window slot descriptors (resource_id, type, vma, decoded type name, core). | +| `sof dbgwin_dump [len]` | Hex-dump a single slot (max `ADSP_DW_SLOT_SIZE` = 4096 bytes); default length 256. | + +### Implementation + +- `mailbox_hex` uses the `MAILBOX_*_BASE` / `MAILBOX_*_SIZE` macros + from [src/include/sof/lib/mailbox.h](src/include/sof/lib/mailbox.h); + the four region records are a static table. +- `dbgwin_dump` re-derives the window 2 base from the device tree + (`mem_window2`) plus `WIN2_OFFSET`, mirrors + `struct adsp_debug_window` from + [zephyr/soc/intel/intel_adsp/common/debug_window.c](../../zephyr/soc/intel/intel_adsp/common/debug_window.c) + and reads through an uncached pointer so we always see the + slot-manager state (works whether or not + `CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER=y`). +- A small shared `sof_shell_hex_dump()` helper handles the 16-byte + hex+ASCII rows and is built whenever either command is enabled. +- Two new Kconfigs (default `y`): + - `CONFIG_SOF_SHELL_MAILBOX_HEX` + - `CONFIG_SOF_SHELL_DBGWIN_DUMP` (depends on `SOC_FAMILY_INTEL_ADSP`) +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- The original quick-win was `crash_log`/`bt`; pivoted because SOF's + in-tree `panic_dump()` is not compiled on Zephyr (Zephyr installs + its own fatal handler) and a running shell can't backtrace its own + CPU after a panic. `mailbox_hex exception` still surfaces whatever + the platform-specific fatal path leaves there, so the same + diagnostic intent is covered as far as it can be from a live shell. +- A future `panic_decode` could parse a known on-target oops layout + (Zephyr coredump or telemetry slot) once one is standardised on + ACE. +- `dbgwin_dump` is read-only. We do not implement a write/seize + command to avoid corrupting host-visible state. + +## Task 6 — `perf_status` + +### Commands + +| Command | Description | +|---|---| +| `sof perf_status` | Print the SOF telemetry performance state (`disabled`/`stopped`/`started`/`paused`) and per-active-core systick counters (`count`, `last_time_elapsed`, `max_time_elapsed`, `avg_kcps`, `peak_kcps`, plus 4k/8k peak utilization). | +| `sof perf_status reset` | Call `reset_performance_counters()` to zero all counters. | +| `sof perf_status start` | Call `enable_performance_counters()` and set state to `STARTED`. | +| `sof perf_status stop` / `pause` | Transition state to `STOPPED` or `PAUSED` (stops sampling without zeroing counters). | + +### Implementation + +- Reads per-core systick info via + `telemetry_get_systick_info_ptr()` (with + `CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER`) or directly from + `ADSP_DW->slots[SOF_DW_TELEMETRY_SLOT]` otherwise. +- Iterates only cores in `cpu_enabled_cores()` so the output matches + the active topology. +- Uses the existing `perf_meas_get_state()` / + `perf_meas_set_state()` / + `enable_performance_counters()` / + `reset_performance_counters()` API from + [src/include/sof/debug/telemetry/performance_monitor.h](src/include/sof/debug/telemetry/performance_monitor.h); + no new state added. +- New Kconfig `CONFIG_SOF_SHELL_PERF_STATUS` (default `y`, + depends on `SOF_TELEMETRY`). +- Shell command in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- We deliberately do not dump the full per-component + `perf_data_item_comp` array yet: it can grow large + (`CONFIG_MEMORY_WIN_3_SIZE` / item size, ~hundreds of items on PTL) + and would require a heap allocation. A future `perf_components` / + `perf_status -v` could iterate `performance_data_bitmap` and stream + one row per occupied slot. +- Zephyr already provides `kernel cpu_load` and `kernel threads`; + `cpu_load` was therefore not duplicated here. + +## Task 7 — `dai_list` / `dma_status` + +### Commands + +| Command | Description | +|---|---| +| `sof dai_list` | Iterate `dai_get_device_list()` and print, per DAI, the Zephyr device name, decoded type (ssp/dmic/hda/alh/uaol/sai/esai/...), index, current channel count, sample rate, format and word size, plus per-direction fifo address, fifo depth, DMA handshake id and stream id. | +| `sof dma_status` | List every SOF DMA controller (`dma_info_get()`), with id, channel count, busy count, caps/devs bitmasks, base address and Zephyr device name. | +| `sof dma_status ` | Walk all channels of one controller, calling `sof_dma_get_status()` on each. | +| `sof dma_status ` | Status of a single channel: busy/idle, direction, pending/free bytes, read/write positions, total_copied. | + +### Implementation + +- `dai_list` uses `dai_get_device_list()` from + [src/include/sof/lib/dai-zephyr.h](src/include/sof/lib/dai-zephyr.h) + and the Zephyr DAI API + (`dai_config_get()`, `dai_get_properties()`); it falls back to + TX-only or RX-only `config_get()` when `DAI_DIR_BOTH` is not + supported by a driver. +- `dma_status` walks `sof_get()->dma_info->dma_array[]` (via + `dma_info_get()` from + [zephyr/include/sof/lib/dma.h](zephyr/include/sof/lib/dma.h)) + and calls `sof_dma_get_status()` per channel; this re-uses the + same Zephyr `dma_get_status()` path the DSP itself uses, so the + numbers exactly match runtime audio state. +- Two new Kconfigs (default `y`, both depend on + `ZEPHYR_NATIVE_DRIVERS`): + - `CONFIG_SOF_SHELL_DAI_LIST` + - `CONFIG_SOF_SHELL_DMA_STATUS` +- Shell commands in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- Read-only on purpose. `dai_trigger`, `dai_loopback`, `dma_stop` + were intentionally not added in this pass — they would corrupt + in-flight streams and require coordination with topology / IPC + state machines. Pair with the existing `pipeline_state` (gated by + `CONFIG_SOF_SHELL_PIPELINE_OPS`) for stream control. +- `dma_status` only iterates SOF-registered DMACs. Zephyr also + ships its own `dma` shell when `CONFIG_DMA_SHELL=y`, but that one + walks Zephyr DMA devices and exposes raw register pokes, so the + two are complementary. + +## Task 8 — `kctl_list` + +### Commands + +| Command | Description | +|---|---| +| `sof kctl_list` | Walk every component in the IPC topology and print `comp_id`, `pipeline_id`, `core`, the decoded module name (`volume`, `gain`, `mixin`, `mixout`, `eqiir`, `src`, ...), a coarse `kind` tag for control-bearing modules (`volume` / `mixer` / `blob` / `config`) and the current `comp_state`. | + +### Implementation + +- Module-adapter components all share `SOF_COMP_MODULE_ADAPTER` for + `drv->type`, so the only stable per-module label available in + firmware is the UUID name string from + `cd->drv->tctx->uuid_p->name` (the same name the LDC tool prints). + `kctl_drv_name()` reads that, `kctl_drv_kind()` maps known module + names to a coarse control-family tag. +- New Kconfig (default `y`, depends on `SHELL`): + `CONFIG_SOF_SHELL_KCTL_LIST`. +- Shell command in [zephyr/sof_shell.c](zephyr/sof_shell.c). + +### Notes / follow-ups + +- Read-only on purpose. `kctl_get` / `kctl_set` are intentionally + not implemented in firmware. Control values flow through + per-module IPC4 large_config blobs + (`set_configuration` / `get_configuration` in + [src/include/module/module/interface.h](src/include/module/module/interface.h)), + each with their own `config_id` namespace and TLV layout. + Marshalling that from the shell would essentially duplicate the + host-side tplg / IPC code path. Use `tinymix` / + [sof-ctl](tools/ctl) on the host instead, and pair with + `sof module_status` for raw component state. +- This concludes the documented quick-win list. Future shell + commands should follow the same pattern: small, read-only, + Kconfig-gated, and complementary to (not a replacement for) the + host control plane. diff --git a/zephyr/sof_shell.c b/zephyr/sof_shell.c index f10a2c9275b5..36cd5c751fd1 100644 --- a/zephyr/sof_shell.c +++ b/zephyr/sof_shell.c @@ -7,17 +7,70 @@ #include /* sof_get() */ #include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if CONFIG_SOF_SHELL_MODULE_LIST +#include +#include +#if CONFIG_LIBRARY_MANAGER +#include +#endif +#endif /* CONFIG_SOF_SHELL_MODULE_LIST */ + +#if CONFIG_SOF_SHELL_PIPELINE_OPS +#include +#include +#include +#include +#endif /* CONFIG_SOF_SHELL_PIPELINE_OPS */ + +#if CONFIG_SOF_SHELL_MMU_DBG +#include +#include +#include +#if CONFIG_XTENSA_MMU +#include +#endif /* CONFIG_XTENSA_MMU */ +#endif /* CONFIG_SOF_SHELL_MMU_DBG */ + +#if CONFIG_SOF_SHELL_LLEXT_LOAD +#include +#include +#include +#endif /* CONFIG_SOF_SHELL_LLEXT_LOAD */ + +#if (CONFIG_SOF_SHELL_LLEXT_LIST || CONFIG_SOF_SHELL_LLEXT_PURGE) && CONFIG_LLEXT +#include +#endif #include #include #include +#if CONFIG_SYS_HEAP_RUNTIME_STATS +#include +#endif #include +#include +#include #define SOF_TEST_INJECT_SCHED_GAP_USEC 1500 -static int cmd_sof_test_inject_sched_gap(const struct shell *sh, +#include +#include +#include + +__cold static int cmd_sof_test_inject_sched_gap(const struct shell *sh, size_t argc, char *argv[]) { uint32_t block_time = SOF_TEST_INJECT_SCHED_GAP_USEC; @@ -42,12 +95,14 @@ static int cmd_sof_test_inject_sched_gap(const struct shell *sh, return 0; } -static int cmd_sof_module_heap_usage(const struct shell *sh, +#if CONFIG_SOF_SHELL_HEAP_USAGE +__cold static int cmd_sof_module_heap_usage(const struct shell *sh, size_t argc, char *argv[]) { struct ipc *ipc = sof_get()->ipc; struct list_item *clist, *_clist; struct ipc_comp_dev *icd; + int count = 0; if (!ipc) { shell_print(sh, "No IPC"); @@ -62,20 +117,2644 @@ static int cmd_sof_module_heap_usage(const struct shell *sh, continue; usage = module_adapter_heap_usage(comp_mod(icd->cd), &hwm); - shell_print(sh, "comp id 0x%08x%9zu usage%9zu hwm %9zu max\tbytes", - icd->id, usage, hwm, comp_mod(icd->cd)->priv.cfg.heap_bytes); + shell_print(sh, "comp id 0x%08x%9zu usage%9zu hwm\tbytes", + icd->id, usage, hwm); + count++; } + + if (!count) + shell_print(sh, "No components found. Start an audio stream first."); + return 0; } +#endif /* CONFIG_SOF_SHELL_HEAP_USAGE */ -SHELL_STATIC_SUBCMD_SET_CREATE(sof_commands, - SHELL_CMD(test_inject_sched_gap, NULL, - "Inject a gap to audio scheduling\n", - cmd_sof_test_inject_sched_gap), +#if CONFIG_SOF_SHELL_PIPELINE_STATUS || CONFIG_SOF_SHELL_MODULE_STATUS - SHELL_CMD(module_heap_usage, NULL, - "Print heap memory usage of each module\n", - cmd_sof_module_heap_usage), +__cold_rodata static const char * const comp_state_names[] = { + [COMP_STATE_NOT_EXIST] = "not_exist", + [COMP_STATE_INIT] = "init", + [COMP_STATE_READY] = "ready", + [COMP_STATE_SUSPEND] = "suspend", + [COMP_STATE_PREPARE] = "prepare", + [COMP_STATE_PAUSED] = "paused", + [COMP_STATE_ACTIVE] = "active", + [COMP_STATE_PRE_ACTIVE] = "pre_active", +}; + +__cold static const char *comp_state_str(uint16_t state) +{ + if (state < ARRAY_SIZE(comp_state_names) && comp_state_names[state]) + return comp_state_names[state]; + return "unknown"; +} + +#endif /* CONFIG_SOF_SHELL_PIPELINE_STATUS || CONFIG_SOF_SHELL_MODULE_STATUS */ + +#if CONFIG_SOF_SHELL_PIPELINE_STATUS +__cold static int cmd_sof_pipeline_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct list_item *clist; + struct ipc_comp_dev *icd; + int count = 0; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + shell_print(sh, "%-8s %-5s %-8s %-10s %-10s %s", + "ppl_id", "core", "priority", "period_us", "status", "state"); + + list_for_item(clist, &ipc->comp_list) { + struct pipeline *p; + + icd = container_of(clist, struct ipc_comp_dev, list); + if (icd->type != COMP_TYPE_PIPELINE) + continue; + + p = icd->pipeline; + shell_print(sh, "%-8u %-5u %-8u %-10u %-10u %s", + p->pipeline_id, p->core, p->priority, + p->period, p->status, + comp_state_str((uint16_t)p->status)); + count++; + } + + if (!count) + shell_print(sh, "No pipelines found."); + + return 0; +} +#endif /* CONFIG_SOF_SHELL_PIPELINE_STATUS */ + +#if CONFIG_SOF_SHELL_MODULE_STATUS +__cold static int cmd_sof_module_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct list_item *clist; + struct ipc_comp_dev *icd; + int count = 0; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + shell_print(sh, "%-12s %-8s %-5s %s", + "comp_id", "ppl_id", "core", "state"); + + list_for_item(clist, &ipc->comp_list) { + icd = container_of(clist, struct ipc_comp_dev, list); + if (icd->type != COMP_TYPE_COMPONENT) + continue; + + shell_print(sh, "0x%-10x %-8u %-5u %s", + icd->id, + icd->cd->pipeline ? icd->cd->pipeline->pipeline_id : 0, + icd->core, + comp_state_str(icd->cd->state)); + count++; + } + + if (!count) + shell_print(sh, "No components found. Start an audio stream first."); + + return 0; +} +#endif /* CONFIG_SOF_SHELL_MODULE_STATUS */ + +#if CONFIG_SOF_SHELL_CORE_STATUS +__cold static int cmd_sof_core_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + int i; + + shell_print(sh, "%-6s %-8s %s", "core", "enabled", "current"); + + for (i = 0; i < CONFIG_CORE_COUNT; i++) { + shell_print(sh, "%-6d %-8s %s", + i, + cpu_is_core_enabled(i) ? "yes" : "no", + (i == cpu_get_id()) ? "<--" : ""); + } + + return 0; +} +#endif /* CONFIG_SOF_SHELL_CORE_STATUS */ + +#if CONFIG_SOF_SHELL_CORE_POWER + +/* + * sof core_on + * sof core_off + * + * Power a secondary DSP core on or off. Core 0 (primary) cannot be + * controlled via these commands. + */ +__cold static int cmd_sof_core_on(const struct shell *sh, + size_t argc, char *argv[]) +{ + char *endptr = NULL; + long id; + int ret; + + id = strtol(argv[1], &endptr, 0); + if (endptr == argv[1] || id < 1 || id >= CONFIG_CORE_COUNT) { + shell_error(sh, "core_id must be 1..%d", CONFIG_CORE_COUNT - 1); + return -EINVAL; + } + + if (cpu_is_core_enabled((int)id)) { + shell_print(sh, "core %ld already active", id); + return 0; + } + + ret = cpu_enable_core((int)id); + if (ret) + shell_error(sh, "core_on: failed to enable core %ld: %d", id, ret); + else + shell_print(sh, "core_on: core %ld enabled", id); + + return ret; +} + +__cold static int cmd_sof_core_off(const struct shell *sh, + size_t argc, char *argv[]) +{ + char *endptr = NULL; + long id; + + id = strtol(argv[1], &endptr, 0); + if (endptr == argv[1] || id < 1 || id >= CONFIG_CORE_COUNT) { + shell_error(sh, "core_id must be 1..%d", CONFIG_CORE_COUNT - 1); + return -EINVAL; + } + + if (!cpu_is_core_enabled((int)id)) { + shell_print(sh, "core %ld already inactive", id); + return 0; + } + + cpu_disable_core((int)id); + + if (cpu_is_core_enabled((int)id)) { + shell_error(sh, "core_off: core %ld did not power down", id); + return -EIO; + } + + shell_print(sh, "core_off: core %ld disabled", id); + return 0; +} + +#endif /* CONFIG_SOF_SHELL_CORE_POWER */ + +#if CONFIG_SOF_SHELL_SRAM_STATUS +__cold static int cmd_sof_sram_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct k_heap *h = sof_sys_heap_get(); + struct sys_memory_stats stats; + + if (!h) { + shell_print(sh, "Heap not available"); + return 0; + } + + sys_heap_runtime_stats_get(&h->heap, &stats); + + shell_print(sh, "HPSRAM heap (sof_heap):"); + shell_print(sh, " allocated: %zu bytes", stats.allocated_bytes); + shell_print(sh, " free: %zu bytes", stats.free_bytes); + shell_print(sh, " max allocated:%zu bytes", stats.max_allocated_bytes); + shell_print(sh, " total: %zu bytes", + stats.allocated_bytes + stats.free_bytes); + + return 0; +} +#endif /* CONFIG_SOF_SHELL_SRAM_STATUS */ + +#if CONFIG_SOF_SHELL_CLOCK_STATUS +__cold static int cmd_sof_clock_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct clock_info *clocks = clocks_get(); + int i; + + if (!clocks) { + shell_print(sh, "Clock info not available"); + return 0; + } + + shell_print(sh, "%-6s %-12s %s", "clock", "freq_hz", "freq_mhz"); + + for (i = 0; i < NUM_CLOCKS; i++) { + uint32_t freq = clocks[i].freqs[clocks[i].current_freq_idx].freq; + + shell_print(sh, "%-6d %-12u %.1f", + i, freq, (double)freq / 1000000.0); + } + + return 0; +} +#endif /* CONFIG_SOF_SHELL_CLOCK_STATUS */ + +#if CONFIG_SOF_SHELL_MODULE_LIST + +/* Page size in DSP manifest entries (instance_bss_size, segment lengths) */ +#ifdef CONFIG_MM_DRV_PAGE_SIZE +#define _SHELL_MOD_PAGE_SZ CONFIG_MM_DRV_PAGE_SIZE +#else +#define _SHELL_MOD_PAGE_SZ 4096 +#endif + +#if CONFIG_IPC4_BASE_FW_INTEL +__cold static void print_manifest_modules(const struct shell *sh, + const struct sof_man_fw_desc *desc, + int lib_id) +{ + const struct sof_man_mod_config *cfg_base; + int i; + + if (!desc) + return; + + cfg_base = (const struct sof_man_mod_config *) + ((const uint8_t *)desc + + SOF_MAN_MODULE_OFFSET(desc->header.num_module_entries)); + + for (i = 0; i < (int)desc->header.num_module_entries; i++) { + const struct sof_man_module *mod; + const struct sof_man_mod_config *cfg = NULL; + uint32_t text_sz, bss_sz; + char name[SOF_MAN_MOD_NAME_LEN + 1]; + + mod = (const struct sof_man_module *) + ((const uint8_t *)desc + SOF_MAN_MODULE_OFFSET(i)); + + /* name is not null-terminated in the manifest */ + memcpy(name, mod->name, SOF_MAN_MOD_NAME_LEN); + name[SOF_MAN_MOD_NAME_LEN] = '\0'; + + if (mod->cfg_count > 0) + cfg = cfg_base + mod->cfg_offset; + + text_sz = (uint32_t)mod->segment[0].flags.r.length * _SHELL_MOD_PAGE_SZ; + bss_sz = (uint32_t)mod->instance_bss_size * _SHELL_MOD_PAGE_SZ; + + shell_print(sh, + "[%d:%d] %-8s" + " uuid:%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x", + lib_id, i, name, + mod->uuid.a, mod->uuid.b, mod->uuid.c, + mod->uuid.d[0], mod->uuid.d[1], + mod->uuid.d[2], mod->uuid.d[3], + mod->uuid.d[4], mod->uuid.d[5], + mod->uuid.d[6], mod->uuid.d[7]); + shell_print(sh, + " inst_max:%-3u bss/inst:%6u B text:%6u B" + " affinity:0x%02x", + mod->instance_max_count, bss_sz, text_sz, + mod->affinity_mask); + if (cfg) + shell_print(sh, + " cpc:%-8u cps:%-9u ibs:%-6u obs:%u", + cfg->cpc, cfg->cps, cfg->ibs, cfg->obs); + else + shell_print(sh, " cpc:N/A"); + } +} +#endif /* CONFIG_IPC4_BASE_FW_INTEL */ + +__cold static int cmd_sof_module_list(const struct shell *sh, + size_t argc, char *argv[]) +{ +#if CONFIG_IPC4_BASE_FW_INTEL + const struct sof_man_fw_desc *desc; + int total = 0; + + shell_print(sh, "Built-in modules:"); + desc = basefw_vendor_get_manifest(); + if (desc) { + print_manifest_modules(sh, desc, 0); + total += (int)desc->header.num_module_entries; + } else { + shell_print(sh, " (manifest not available)"); + } + +#if CONFIG_LIBRARY_MANAGER + { + int lib_id; + + for (lib_id = 1; lib_id < LIB_MANAGER_MAX_LIBS; lib_id++) { + desc = lib_manager_get_library_manifest( + LIB_MANAGER_PACK_LIB_ID(lib_id)); + if (!desc) + continue; + shell_print(sh, "Library %d modules:", lib_id); + print_manifest_modules(sh, desc, lib_id); + total += (int)desc->header.num_module_entries; + } + } +#endif /* CONFIG_LIBRARY_MANAGER */ + + if (!total) + shell_print(sh, "No modules found."); + +#else /* !CONFIG_IPC4_BASE_FW_INTEL */ + /* Generic fallback: list registered component drivers */ + struct comp_driver_list *drivers = comp_drivers_get(); + struct list_item *clist; + struct comp_driver_info *info; + int count = 0; + + shell_print(sh, "%-5s %-24s %s", "type", "name", "uuid"); + + list_for_item(clist, &drivers->list) { + const struct sof_uuid *uid; + const char *name; + + info = container_of(clist, struct comp_driver_info, list); + uid = info->drv->uid; + name = (info->drv->tctx && info->drv->tctx->uuid_p) + ? info->drv->tctx->uuid_p->name : "?"; + + shell_print(sh, + "%-5u %-24s" + " %08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x", + info->drv->type, name, + uid->a, uid->b, uid->c, + uid->d[0], uid->d[1], uid->d[2], uid->d[3], + uid->d[4], uid->d[5], uid->d[6], uid->d[7]); + count++; + } + + if (!count) + shell_print(sh, "No drivers registered."); +#endif /* CONFIG_IPC4_BASE_FW_INTEL */ + + return 0; +} +#endif /* CONFIG_SOF_SHELL_MODULE_LIST */ + +/* parse_long: used by PIPELINE_OPS commands and LLEXT_LOAD; must be outside + * individual feature guards so all callers can see it. + */ +__cold static int parse_long(const struct shell *sh, const char *s, long *out, + long min_val, long max_val) +{ + char *endptr; + long v = strtol(s, &endptr, 0); + + if (endptr == s || v < min_val || v > max_val) { + shell_print(sh, "error: invalid value '%s' (allowed %ld..%ld)", + s, min_val, max_val); + return -EINVAL; + } + *out = v; + return 0; +} + +#if CONFIG_SOF_SHELL_PIPELINE_OPS + +/* + * Resolve a module argument that may be either: + * - a numeric module_id (e.g. "2", "0x02") + * - a module name string (e.g. "COPIER", "copier") — IPC4/Intel only + * + * Returns 0 on success, -EINVAL on failure. + */ +__cold static int parse_module_id(const struct shell *sh, const char *s, + long *module_id) +{ + char *endptr; + long v = strtol(s, &endptr, 0); + + /* Numeric: accepted if the whole string was consumed */ + if (endptr != s && *endptr == '\0') { + if (v < 0 || v > 0xFFFF) { + shell_print(sh, "error: module id 0x%lx out of range", v); + return -EINVAL; + } + *module_id = v; + return 0; + } + +#if CONFIG_IPC4_BASE_FW_INTEL + /* Name lookup: search built-in manifest then loaded libraries */ + { + char upper[SOF_MAN_MOD_NAME_LEN + 1]; + const struct sof_man_fw_desc *desc; + uint32_t i; + int k; + + /* Upper-case the input for case-insensitive compare */ + for (k = 0; k < SOF_MAN_MOD_NAME_LEN && s[k]; k++) + upper[k] = (char)toupper((unsigned char)s[k]); + upper[k] = '\0'; + + desc = basefw_vendor_get_manifest(); + if (desc) { + for (i = 0; i < desc->header.num_module_entries; i++) { + const struct sof_man_module *mod = + (const struct sof_man_module *) + ((const uint8_t *)desc + + SOF_MAN_MODULE_OFFSET(i)); + char mname[SOF_MAN_MOD_NAME_LEN + 1]; + int j; + + for (j = 0; j < SOF_MAN_MOD_NAME_LEN; j++) + mname[j] = (char)toupper( + (unsigned char)mod->name[j]); + mname[SOF_MAN_MOD_NAME_LEN] = '\0'; + + if (!strncmp(upper, mname, + SOF_MAN_MOD_NAME_LEN)) { + *module_id = (long)mod->module_id; + return 0; + } + } + } + +#if CONFIG_LIBRARY_MANAGER + { + int lib_id; + + for (lib_id = 1; lib_id < LIB_MANAGER_MAX_LIBS; + lib_id++) { + uint32_t pack_id = LIB_MANAGER_PACK_LIB_ID( + lib_id); + + desc = lib_manager_get_library_manifest( + pack_id); + if (!desc) + continue; + for (i = 0; + i < desc->header.num_module_entries; + i++) { + const struct sof_man_module *mod = + (const struct sof_man_module *) + ((const uint8_t *)desc + + SOF_MAN_MODULE_OFFSET(i)); + char mname[SOF_MAN_MOD_NAME_LEN + 1]; + int j; + + for (j = 0; + j < SOF_MAN_MOD_NAME_LEN; j++) + mname[j] = (char)toupper( + (unsigned char) + mod->name[j]); + mname[SOF_MAN_MOD_NAME_LEN] = '\0'; + + if (!strncmp(upper, mname, + SOF_MAN_MOD_NAME_LEN)) { + *module_id = + (long)mod->module_id; + return 0; + } + } + } + } +#endif /* CONFIG_LIBRARY_MANAGER */ + } +#endif /* CONFIG_IPC4_BASE_FW_INTEL */ + + shell_print(sh, "error: unknown module '%s' (use name or numeric id)", s); + return -EINVAL; +} + +/* sof ppl_create [priority=0] [pages=2] [core=0] [lp=0] */ +__cold static int cmd_sof_ppl_create(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc4_pipeline_create msg = {}; + struct ipc *ipc = sof_get()->ipc; + long ppl_id, priority = 0, pages = 2, core = 0, lp = 0; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_long(sh, argv[1], &ppl_id, 0, 255) < 0) return -EINVAL; + if (argc > 2 && parse_long(sh, argv[2], &priority, 0, 7) < 0) return -EINVAL; + if (argc > 3 && parse_long(sh, argv[3], &pages, 1, 2047) < 0) return -EINVAL; + if (argc > 4 && parse_long(sh, argv[4], &core, 0, 7) < 0) return -EINVAL; + if (argc > 5 && parse_long(sh, argv[5], &lp, 0, 1) < 0) return -EINVAL; + + msg.primary.r.ppl_mem_size = (uint32_t)pages; + msg.primary.r.ppl_priority = (uint32_t)priority; + msg.primary.r.instance_id = (uint32_t)ppl_id; + msg.primary.r.type = SOF_IPC4_GLB_CREATE_PIPELINE; + msg.extension.r.lp = (uint32_t)lp; + msg.extension.r.core_id = (uint32_t)core; + + ret = ipc_pipeline_new(ipc, (ipc_pipe_new *)&msg); + if (ret < 0) + shell_print(sh, "ppl_create %ld failed: %d", ppl_id, ret); + else + shell_print(sh, "pipeline %ld created (prio=%ld pages=%ld core=%ld lp=%ld)", + ppl_id, priority, pages, core, lp); + return 0; +} + +/* sof ppl_delete */ +__cold static int cmd_sof_ppl_delete(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + long ppl_id; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_long(sh, argv[1], &ppl_id, 0, 255) < 0) return -EINVAL; + + ret = ipc_pipeline_free(ipc, (uint32_t)ppl_id); + if (ret < 0) + shell_print(sh, "ppl_delete %ld failed: %d", ppl_id, ret); + else + shell_print(sh, "pipeline %ld deleted", ppl_id); + return 0; +} + +/* sof ppl_state */ +__cold static int cmd_sof_ppl_state(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct ipc_comp_dev *ppl_icd; + bool delayed = false; + long ppl_id; + uint32_t cmd; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_long(sh, argv[1], &ppl_id, 0, 255) < 0) return -EINVAL; + + if (!strcmp(argv[2], "running")) + cmd = SOF_IPC4_PIPELINE_STATE_RUNNING; + else if (!strcmp(argv[2], "paused")) + cmd = SOF_IPC4_PIPELINE_STATE_PAUSED; + else if (!strcmp(argv[2], "reset")) + cmd = SOF_IPC4_PIPELINE_STATE_RESET; + else { + shell_print(sh, "unknown state '%s' (running|paused|reset)", argv[2]); + return -EINVAL; + } + + ppl_icd = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, + (uint32_t)ppl_id, IPC_COMP_IGNORE_REMOTE); + if (!ppl_icd) { + shell_print(sh, "pipeline %ld not found", ppl_id); + return 0; + } + + ret = ipc4_pipeline_prepare(ppl_icd, cmd); + if (ret < 0) { + shell_print(sh, "ppl_state %ld prepare failed: %d", ppl_id, ret); + return 0; + } + + ret = ipc4_pipeline_trigger(ppl_icd, cmd, &delayed); + if (ret < 0) + shell_print(sh, "ppl_state %ld trigger failed: %d", ppl_id, ret); + else + shell_print(sh, "pipeline %ld -> %s%s", ppl_id, argv[2], + delayed ? " (delayed)" : ""); + return 0; +} + +/* sof mod_init [core=0] [dp=0] */ +__cold static int cmd_sof_mod_init(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc4_module_init_instance msg = {}; + struct comp_dev *dev; + long mod_id, inst_id, ppl_id, core = 0, dp = 0; + + if (parse_module_id(sh, argv[1], &mod_id) < 0) return -EINVAL; + if (parse_long(sh, argv[2], &inst_id, 0, 255) < 0) return -EINVAL; + if (parse_long(sh, argv[3], &ppl_id, 0, 255) < 0) return -EINVAL; + if (argc > 4 && parse_long(sh, argv[4], &core, 0, 7) < 0) return -EINVAL; + if (argc > 5 && parse_long(sh, argv[5], &dp, 0, 1) < 0) return -EINVAL; + + msg.primary.r.module_id = (uint32_t)mod_id; + msg.primary.r.instance_id = (uint32_t)inst_id; + msg.primary.r.type = SOF_IPC4_MOD_INIT_INSTANCE; + msg.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_MODULE_MSG; + msg.extension.r.ppl_instance_id = (uint32_t)ppl_id; + msg.extension.r.core_id = (uint32_t)core; + msg.extension.r.proc_domain = (uint32_t)dp; + msg.extension.r.param_block_size = 0; + + dev = comp_new_ipc4(&msg); + if (!dev) + shell_print(sh, "mod_init module=0x%lx inst=%ld failed", + mod_id, inst_id); + else + shell_print(sh, + "module 0x%lx inst %ld created in pipeline %ld" + " comp_id=0x%08x", + mod_id, inst_id, ppl_id, + IPC4_COMP_ID((uint32_t)mod_id, (uint32_t)inst_id)); + return 0; +} + +/* sof mod_delete */ +__cold static int cmd_sof_mod_delete(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + long mod_id, inst_id; + uint32_t comp_id; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_module_id(sh, argv[1], &mod_id) < 0) return -EINVAL; + if (parse_long(sh, argv[2], &inst_id, 0, 255) < 0) return -EINVAL; + + comp_id = IPC4_COMP_ID((uint32_t)mod_id, (uint32_t)inst_id); + ret = ipc_comp_free(ipc, comp_id); + if (ret < 0) + shell_print(sh, "mod_delete module=0x%lx inst=%ld failed: %d", + mod_id, inst_id, ret); + else + shell_print(sh, "module 0x%lx inst %ld deleted", mod_id, inst_id); + return 0; +} + +/* sof mod_bind [sq=0] [dq=0] */ +__cold static int cmd_sof_mod_bind(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc4_module_bind_unbind msg = {}; + struct ipc *ipc = sof_get()->ipc; + long src_mod, src_inst, dst_mod, dst_inst, src_q = 0, dst_q = 0; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_module_id(sh, argv[1], &src_mod) < 0) return -EINVAL; + if (parse_long(sh, argv[2], &src_inst, 0, 255) < 0) return -EINVAL; + if (parse_module_id(sh, argv[3], &dst_mod) < 0) return -EINVAL; + if (parse_long(sh, argv[4], &dst_inst, 0, 255) < 0) return -EINVAL; + if (argc > 5 && parse_long(sh, argv[5], &src_q, 0, 7) < 0) return -EINVAL; + if (argc > 6 && parse_long(sh, argv[6], &dst_q, 0, 7) < 0) return -EINVAL; + + msg.primary.r.module_id = (uint32_t)src_mod; + msg.primary.r.instance_id = (uint32_t)src_inst; + msg.primary.r.type = SOF_IPC4_MOD_BIND; + msg.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_MODULE_MSG; + msg.extension.r.dst_module_id = (uint32_t)dst_mod; + msg.extension.r.dst_instance_id = (uint32_t)dst_inst; + msg.extension.r.src_queue = (uint32_t)src_q; + msg.extension.r.dst_queue = (uint32_t)dst_q; + + ret = ipc_comp_connect(ipc, (ipc_pipe_comp_connect *)&msg); + if (ret < 0) + shell_print(sh, "mod_bind failed: %d", ret); + else + shell_print(sh, "bound 0x%lx:%ld[q%ld] -> 0x%lx:%ld[q%ld]", + src_mod, src_inst, src_q, + dst_mod, dst_inst, dst_q); + return 0; +} + +/* sof mod_unbind [sq=0] [dq=0] */ +__cold static int cmd_sof_mod_unbind(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc4_module_bind_unbind msg = {}; + struct ipc *ipc = sof_get()->ipc; + long src_mod, src_inst, dst_mod, dst_inst, src_q = 0, dst_q = 0; + int ret; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + if (parse_module_id(sh, argv[1], &src_mod) < 0) return -EINVAL; + if (parse_long(sh, argv[2], &src_inst, 0, 255) < 0) return -EINVAL; + if (parse_module_id(sh, argv[3], &dst_mod) < 0) return -EINVAL; + if (parse_long(sh, argv[4], &dst_inst, 0, 255) < 0) return -EINVAL; + if (argc > 5 && parse_long(sh, argv[5], &src_q, 0, 7) < 0) return -EINVAL; + if (argc > 6 && parse_long(sh, argv[6], &dst_q, 0, 7) < 0) return -EINVAL; + + msg.primary.r.module_id = (uint32_t)src_mod; + msg.primary.r.instance_id = (uint32_t)src_inst; + msg.primary.r.type = SOF_IPC4_MOD_UNBIND; + msg.primary.r.msg_tgt = SOF_IPC4_MESSAGE_TARGET_MODULE_MSG; + msg.extension.r.dst_module_id = (uint32_t)dst_mod; + msg.extension.r.dst_instance_id = (uint32_t)dst_inst; + msg.extension.r.src_queue = (uint32_t)src_q; + msg.extension.r.dst_queue = (uint32_t)dst_q; + + ret = ipc_comp_disconnect(ipc, (ipc_pipe_comp_connect *)&msg); + if (ret < 0) + shell_print(sh, "mod_unbind failed: %d", ret); + else + shell_print(sh, "unbound 0x%lx:%ld[q%ld] -/- 0x%lx:%ld[q%ld]", + src_mod, src_inst, src_q, + dst_mod, dst_inst, dst_q); + return 0; +} + +#endif /* CONFIG_SOF_SHELL_PIPELINE_OPS */ + +#if CONFIG_SOF_SHELL_MMU_DBG + +#if CONFIG_MM_DRV_INTEL_ADSP_MTL_TLB + +/* + * Lightweight wrappers around the Intel ADSP MTL TLB MMIO table. + * Mirrors mm_drv_intel_adsp.h without pulling in the driver-internal header. + */ +#define _SHELL_TLB_NODE DT_NODELABEL(tlb) +#define _SHELL_TLB_BASE ((volatile uint16_t *)(uintptr_t)DT_REG_ADDR(_SHELL_TLB_NODE)) +#define _SHELL_PADDR_SIZE DT_PROP(_SHELL_TLB_NODE, paddr_size) +#define _SHELL_TLB_ENTRY_NUM BIT(_SHELL_PADDR_SIZE) +#define _SHELL_PADDR_MASK (_SHELL_TLB_ENTRY_NUM - 1) +#define _SHELL_ENABLE_BIT ((uint16_t)BIT(_SHELL_PADDR_SIZE)) +#define _SHELL_EXEC_BIT ((uint16_t)BIT(DT_PROP(_SHELL_TLB_NODE, exec_bit_idx))) +#define _SHELL_WRITE_BIT ((uint16_t)BIT(DT_PROP(_SHELL_TLB_NODE, write_bit_idx))) + +/* + * Base physical address for the HPSRAM region (mirrors TLB_PHYS_BASE in the + * driver). Physical pages whose index fits in paddr_size bits are located + * starting here. + */ +#define _SHELL_PHYS_BASE \ + (((CONFIG_KERNEL_VM_BASE / CONFIG_MM_DRV_PAGE_SIZE) & ~_SHELL_PADDR_MASK) * \ + CONFIG_MM_DRV_PAGE_SIZE) + +/* Convert virtual-address index → physical address */ +__cold static uintptr_t shell_tlb_idx_to_pa(uint32_t idx, uint16_t entry) +{ + return _SHELL_PHYS_BASE + + ((uintptr_t)(entry & _SHELL_PADDR_MASK) * CONFIG_MM_DRV_PAGE_SIZE); +} + +/* Decode 16-bit TLB entry permission bits into a short string */ +__cold static void shell_tlb_flags_str(uint16_t entry, char *buf) +{ + buf[0] = 'R'; + buf[1] = (entry & _SHELL_WRITE_BIT) ? 'W' : '-'; + buf[2] = (entry & _SHELL_EXEC_BIT) ? 'X' : '-'; + buf[3] = '\0'; +} + +/* sof mmu_status */ +__cold static int cmd_sof_mmu_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + const struct sys_mm_drv_region *regions, *r; + volatile uint16_t *tlb = _SHELL_TLB_BASE; + uint32_t total = _SHELL_TLB_ENTRY_NUM; + uint32_t enabled = 0; + uint32_t i; + + /* Count active TLB entries */ + for (i = 0; i < total; i++) { + if (tlb[i] & _SHELL_ENABLE_BIT) + enabled++; + } + + shell_print(sh, "Intel ADSP MTL TLB / Virtual Memory Status"); + shell_print(sh, " VM base: 0x%08x", CONFIG_KERNEL_VM_BASE); + shell_print(sh, " VM size: 0x%08x (%u KB)", + (uint32_t)(total * CONFIG_MM_DRV_PAGE_SIZE), + (uint32_t)(total * CONFIG_MM_DRV_PAGE_SIZE / 1024)); + shell_print(sh, " page size: %u B", CONFIG_MM_DRV_PAGE_SIZE); + shell_print(sh, " total entries: %u", total); + shell_print(sh, " mapped pages: %u (%u KB)", + enabled, enabled * CONFIG_MM_DRV_PAGE_SIZE / 1024); + shell_print(sh, " free pages: %u (%u KB)", + total - enabled, + (total - enabled) * CONFIG_MM_DRV_PAGE_SIZE / 1024); + shell_print(sh, " TLB MMIO base: 0x%08x", + (uint32_t)(uintptr_t)_SHELL_TLB_BASE); + shell_print(sh, " paddr_size: %u enable_bit:%u exec_bit:%u write_bit:%u", + _SHELL_PADDR_SIZE, + _SHELL_PADDR_SIZE, + DT_PROP(_SHELL_TLB_NODE, exec_bit_idx), + DT_PROP(_SHELL_TLB_NODE, write_bit_idx)); + + shell_print(sh, ""); + shell_print(sh, "Mapped memory regions (sys_mm_drv):"); + shell_print(sh, " %-10s %-10s %s", "address", "size", "attr"); + + regions = sys_mm_drv_query_memory_regions(); + if (regions) { + SYS_MM_DRV_MEMORY_REGION_FOREACH(regions, r) { + shell_print(sh, " 0x%08x 0x%08x 0x%08x", + (uint32_t)(uintptr_t)r->addr, + (uint32_t)r->size, + (uint32_t)r->attr); + } + sys_mm_drv_query_memory_regions_free(regions); + } else { + shell_print(sh, " (not available)"); + } + return 0; +} + +/* sof tlb_dump */ +__cold static int cmd_sof_tlb_dump(const struct shell *sh, + size_t argc, char *argv[]) +{ + volatile uint16_t *tlb = _SHELL_TLB_BASE; + uint32_t total = _SHELL_TLB_ENTRY_NUM; + uint32_t count = 0; + uint32_t i; + + shell_print(sh, " idx vaddr paddr flags entry"); + + for (i = 0; i < total; i++) { + uint16_t entry = tlb[i]; + + if (!(entry & _SHELL_ENABLE_BIT)) + continue; + + uintptr_t vaddr = CONFIG_KERNEL_VM_BASE + + (uintptr_t)i * CONFIG_MM_DRV_PAGE_SIZE; + uintptr_t paddr = shell_tlb_idx_to_pa(i, entry); + char flags[4]; + + shell_tlb_flags_str(entry, flags); + shell_print(sh, " %-5u 0x%08x 0x%08x %s 0x%04x", + i, (uint32_t)vaddr, (uint32_t)paddr, + flags, (uint32_t)entry); + count++; + } + + shell_print(sh, "Total: %u/%u entries active", count, total); + return 0; +} + +/* sof tlb_lookup [end_vaddr] */ +__cold static int cmd_sof_tlb_lookup(const struct shell *sh, + size_t argc, char *argv[]) +{ + volatile uint16_t *tlb = _SHELL_TLB_BASE; + uintptr_t vstart, vend; + + /* Parse start address */ + { + char *ep; + unsigned long v = strtoul(argv[1], &ep, 0); + + if (ep == argv[1]) { + shell_print(sh, "error: invalid address '%s'", argv[1]); + return -EINVAL; + } + vstart = (uintptr_t)v & ~(CONFIG_MM_DRV_PAGE_SIZE - 1); + } + + if (argc > 2) { + char *ep; + unsigned long v = strtoul(argv[2], &ep, 0); + + if (ep == argv[2]) { + shell_print(sh, "error: invalid address '%s'", argv[2]); + return -EINVAL; + } + vend = (uintptr_t)v & ~(CONFIG_MM_DRV_PAGE_SIZE - 1); + } else { + vend = vstart; + } + + if (vend < vstart) + vend = vstart; + + shell_print(sh, " vaddr paddr mapped flags bank entry"); + + for (uintptr_t va = vstart; va <= vend; va += CONFIG_MM_DRV_PAGE_SIZE) { + uintptr_t vm_base = CONFIG_KERNEL_VM_BASE; + uintptr_t vm_end = vm_base + + (uintptr_t)_SHELL_TLB_ENTRY_NUM * + CONFIG_MM_DRV_PAGE_SIZE - 1; + + if (va < vm_base || va > vm_end) { + shell_print(sh, " 0x%08x (outside VM range)", + (uint32_t)va); + continue; + } + + uint32_t idx = (uint32_t)((va - vm_base) / + CONFIG_MM_DRV_PAGE_SIZE); + uint16_t entry = tlb[idx]; + bool mapped = (entry & _SHELL_ENABLE_BIT) != 0; + + if (!mapped) { + shell_print(sh, " 0x%08x (not mapped)", + (uint32_t)va); + continue; + } + + uintptr_t pa = shell_tlb_idx_to_pa(idx, entry); + uint32_t bank = (uint32_t)((pa - _SHELL_PHYS_BASE) / + (128 * 1024)); + char flags[4]; + + shell_tlb_flags_str(entry, flags); + shell_print(sh, " 0x%08x 0x%08x yes %s %-4u 0x%04x", + (uint32_t)va, (uint32_t)pa, + flags, bank, (uint32_t)entry); + } + return 0; +} + +#endif /* CONFIG_MM_DRV_INTEL_ADSP_MTL_TLB */ + +#if CONFIG_XTENSA_MMU + +/* + * Xtensa hardware MMU helpers. + * + * PDTLB result layout (Xtensa ISA §4.6.5): + * bit[4] = HIT + * bits[3:0] = TLB way (valid when HIT) + * + * RDTLB0 result: + * bits[31:12] = Virtual Page Number + * bits[5:4] = ring (0=kernel, 1=unused, 2=user, 3=shared) + * bits[3:0] = CA (cache + access attributes) + * + * RDTLB1 result: + * bits[31:12] = Physical Page Number + * bits[3:0] = CA (same as RDTLB0) + * + * CA bits (from ): + * bit 0 = XTENSA_MMU_PERM_X (executable) + * bit 1 = XTENSA_MMU_PERM_W (writable) + * bit 2 = XTENSA_MMU_CACHED_WB (write-back cache) + * bit 3 = XTENSA_MMU_CACHED_WT (write-through cache) + * bits 2+3 both set = illegal / not-present + */ +#define SHELL_PDTLB_HIT 0x10U /* bit 4 of pdtlb result */ +#define SHELL_PTE_RING_SHIFT 4U +#define SHELL_PTE_RING_MASK 0x30U +#define SHELL_PTE_CA_MASK 0x0FU +#define SHELL_PTE_PPN_MASK 0xFFFFF000U + +static inline uint32_t shell_pdtlb(void *vaddr) +{ + uint32_t r; + __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a"(r) : "a"((uint32_t)vaddr)); + return r; +} + +static inline uint32_t shell_rdtlb0(uint32_t entry) +{ + uint32_t r; + __asm__ volatile("rdtlb0 %0, %1\n\t" : "=a"(r) : "a"(entry)); + return r; +} + +static inline uint32_t shell_rdtlb1(uint32_t entry) +{ + uint32_t r; + __asm__ volatile("rdtlb1 %0, %1\n\t" : "=a"(r) : "a"(entry)); + return r; +} + +static inline uint32_t shell_rsr_rasid(void) +{ + uint32_t r; + __asm__ volatile("rsr %0, rasid" : "=a"(r)); + return r; +} + +/* Decode 4-bit CA cache field into a short string */ +__cold static const char *ca_cache_str(uint32_t ca) +{ + switch (ca & (XTENSA_MMU_CACHED_WB | XTENSA_MMU_CACHED_WT)) { + case 0: return "uncached"; + case XTENSA_MMU_CACHED_WB: return "WB "; + case XTENSA_MMU_CACHED_WT: return "WT "; + default: return "illegal "; + } +} + +__cold_rodata static const char * const ring_name[] = { + "kernel", "unused", "user", "shared" +}; + +/* sof rasid */ +__cold static int cmd_sof_rasid(const struct shell *sh, + size_t argc, char *argv[]) +{ + uint32_t rasid = shell_rsr_rasid(); + int ring; + + shell_print(sh, "RASID: 0x%08x", rasid); + for (ring = 0; ring < 4; ring++) { + uint8_t asid = (uint8_t)((rasid >> (ring * 8)) & 0xff); + + shell_print(sh, " ring %d (%s):\tASID 0x%02x", + ring, ring_name[ring], asid); + } + return 0; +} + +/* sof page_info [end_vaddr] */ +__cold static int cmd_sof_page_info(const struct shell *sh, + size_t argc, char *argv[]) +{ + uintptr_t vstart, vend; + uint32_t rasid; + + { + char *ep; + unsigned long v = strtoul(argv[1], &ep, 0); + + if (ep == argv[1]) { + shell_print(sh, "error: invalid address '%s'", argv[1]); + return -EINVAL; + } + vstart = (uintptr_t)v & ~(uintptr_t)(KB(4) - 1); + } + + if (argc > 2) { + char *ep; + unsigned long v = strtoul(argv[2], &ep, 0); + + if (ep == argv[2]) { + shell_print(sh, "error: invalid address '%s'", argv[2]); + return -EINVAL; + } + vend = (uintptr_t)v & ~(uintptr_t)(KB(4) - 1); + } else { + vend = vstart; + } + + if (vend < vstart) + vend = vstart; + + rasid = shell_rsr_rasid(); + shell_print(sh, "RASID: 0x%08x", rasid); + shell_print(sh, " %-12s %-12s ring asid perms cache"); + + for (uintptr_t va = vstart; va <= vend; va += KB(4)) { + uint32_t probe = shell_pdtlb((void *)va); + + if (!(probe & SHELL_PDTLB_HIT)) { + shell_print(sh, + " 0x%08x (DTLB miss — not in TLB cache)", + (uint32_t)va); + continue; + } + + uint32_t pte0 = shell_rdtlb0(probe); + uint32_t pte1 = shell_rdtlb1(probe); + uint32_t ring = (pte0 & SHELL_PTE_RING_MASK) >> SHELL_PTE_RING_SHIFT; + uint32_t ca = pte0 & SHELL_PTE_CA_MASK; + uint32_t paddr = pte1 & SHELL_PTE_PPN_MASK; + uint8_t asid = (uint8_t)((rasid >> (ring * 8)) & 0xff); + char perm[4] = { + 'R', + (ca & XTENSA_MMU_PERM_W) ? 'W' : '-', + (ca & XTENSA_MMU_PERM_X) ? 'X' : '-', + '\0' + }; + + shell_print(sh, + " 0x%08x 0x%08x %u 0x%02x %s %s", + (uint32_t)va, paddr, + ring, asid, + perm, ca_cache_str(ca)); + } + return 0; +} + +#endif /* CONFIG_XTENSA_MMU */ + +#endif /* CONFIG_SOF_SHELL_MMU_DBG */ + +#if CONFIG_SOF_SHELL_LLEXT_LOAD + +#define SOF_SHELL_LLEXT_TIMEOUT_MSEC 120000U +#define SOF_SHELL_LLEXT_POLL_MSEC 500U + +__cold static int cmd_sof_llext_load(const struct shell *sh, + size_t argc, char *argv[]) +{ + const char *name = argv[1]; + uint32_t lib_id = 1; + volatile struct sof_shell_llext_slot *slot; + uint32_t elapsed = 0; + uint32_t state; + + if (argc > 2) { + long val; + int ret = parse_long(sh, argv[2], &val, 1, LIB_MANAGER_MAX_LIBS - 1); + + if (ret) + return ret; + lib_id = (uint32_t)val; + } + + /* Acquire or reuse the LLEXT_LOAD debug window slot */ +#if CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER + { + struct adsp_dw_desc slot_desc = { .type = ADSP_DW_SLOT_LLEXT_LOAD }; + size_t slot_size; + + slot = adsp_dw_request_slot(&slot_desc, &slot_size); + if (!slot) { + shell_error(sh, "Failed to acquire debug window slot"); + return -ENOMEM; + } + } +#else + /* Fall back to a compile-time fixed slot index */ + slot = (volatile struct sof_shell_llext_slot *) + ADSP_DW->slots[CONFIG_SOF_SHELL_LLEXT_LOAD_SLOT_NUM]; + ADSP_DW->descs[CONFIG_SOF_SHELL_LLEXT_LOAD_SLOT_NUM].type = + ADSP_DW_SLOT_LLEXT_LOAD; +#endif + + state = slot->state; + if (state != SOF_SHELL_LLEXT_IDLE) { + shell_error(sh, "llext_load slot busy (state=%u) — try again later", + state); + return -EBUSY; + } + + /* Initialise the shared slot word-by-word (MMIO/uncached region) */ + { + volatile uint32_t *p = (volatile uint32_t *)slot; + size_t nwords = sizeof(struct sof_shell_llext_slot) / sizeof(uint32_t); + + for (size_t i = 0; i < nwords; i++) + p[i] = 0; + } + strncpy((char *)slot->name, name, sizeof(slot->name) - 1); + slot->lib_id = lib_id; + slot->magic = SOF_SHELL_LLEXT_MAGIC; + /* Publish state last so the host only sees REQUESTING once all fields are set */ + slot->state = SOF_SHELL_LLEXT_REQUESTING; + + shell_print(sh, "Slot ready: name=%s lib_id=%u timeout=%us", + name, lib_id, SOF_SHELL_LLEXT_TIMEOUT_MSEC / 1000); + shell_print(sh, "On host: dd if= of=/sys/kernel/debug/sof/llext_load bs=$(stat -c%%s ) count=1"); + + /* Poll waiting for the host to finish DMA + library load */ + while (elapsed < SOF_SHELL_LLEXT_TIMEOUT_MSEC) { + k_msleep(SOF_SHELL_LLEXT_POLL_MSEC); + elapsed += SOF_SHELL_LLEXT_POLL_MSEC; + + state = slot->state; + + if (state == SOF_SHELL_LLEXT_DMA_DONE) { + shell_print(sh, + "llext_load OK: lib_id=%u %u bytes transferred", + lib_id, slot->xfer_bytes); + slot->state = SOF_SHELL_LLEXT_IDLE; + return 0; + } + + if (state == SOF_SHELL_LLEXT_ERROR) { + shell_error(sh, "llext_load FAILED: result=%d", slot->result); + slot->state = SOF_SHELL_LLEXT_IDLE; + return (int)slot->result; + } + } + + shell_error(sh, "llext_load timeout after %us", + SOF_SHELL_LLEXT_TIMEOUT_MSEC / 1000); + slot->state = SOF_SHELL_LLEXT_IDLE; + return -ETIMEDOUT; +} + +#endif /* CONFIG_SOF_SHELL_LLEXT_LOAD */ + +#if CONFIG_SOF_SHELL_LLEXT_LIST + +/* + * sof llext_list + * + * Lists all llext libraries currently held in IMR/DRAM. For each library the + * DRAM base address, total storage size and per-module-file SRAM state are + * printed. + * + * Example output: + * llext libs in IMR/DRAM: + * [1] base=0x89000000 size=49152 B modules=1 + * [1:0] TESTER mapped=NO use=0 dep=0 + */ +__cold static int cmd_sof_llext_list(const struct shell *sh, + size_t argc, char *argv[]) +{ + ARG_UNUSED(argc); + ARG_UNUSED(argv); + +#if CONFIG_LIBRARY_MANAGER + struct ext_library *ext_lib = ext_lib_get(); + int found = 0; + int lib_id; + + shell_print(sh, "llext libs in IMR/DRAM:"); + + for (lib_id = 1; lib_id < LIB_MANAGER_MAX_LIBS; lib_id++) { + const struct lib_manager_mod_ctx *ctx = ext_lib->desc[lib_id]; + const struct sof_man_fw_desc *desc; + uint32_t store_bytes; + + if (!ctx || !ctx->base_addr) + continue; + + desc = (const struct sof_man_fw_desc *) + ((const uint8_t *)ctx->base_addr + SOF_MAN_ELF_TEXT_OFFSET); + store_bytes = desc->header.preload_page_count * + (uint32_t)_SHELL_MOD_PAGE_SZ; + + shell_print(sh, "[%d] base=%p size=%u B manifest_mods=%u elf_files=%u", + lib_id, ctx->base_addr, store_bytes, + desc->header.num_module_entries, + ctx->n_mod); + +#if CONFIG_LLEXT + if (ctx->mod) { + unsigned int i; + + for (i = 0; i < ctx->n_mod; i++) { + const struct lib_manager_module *m = ctx->mod + i; + int use = m->llext ? (int)m->llext->use_count : 0; + char name[SOF_MAN_MOD_NAME_LEN + 1]; + const uint8_t *nm; + + if (m->mod_manifest) { + nm = m->mod_manifest->module.name; + } else { + const struct sof_man_module *mm = + (const struct sof_man_module *) + ((const uint8_t *)desc + + SOF_MAN_MODULE_OFFSET(m->start_idx)); + nm = mm->name; + } + memcpy(name, nm, SOF_MAN_MOD_NAME_LEN); + name[SOF_MAN_MOD_NAME_LEN] = '\0'; + + shell_print(sh, + " [%d:%u] %-8s" + " DRAM=yes SRAM=%-3s" + " use=%-2d dep=%u", + lib_id, i, name, + m->mapped ? "yes" : "no", + use, + m->n_dependent); + } + } +#endif /* CONFIG_LLEXT */ + + found++; + } + + if (!found) + shell_print(sh, " (none)"); +#else + shell_print(sh, "Library manager not enabled."); +#endif /* CONFIG_LIBRARY_MANAGER */ + return 0; +} + +#endif /* CONFIG_SOF_SHELL_LLEXT_LIST */ + +#if CONFIG_SOF_SHELL_LLEXT_PURGE + +/* + * sof llext_purge + * + * Removes a loadable llext library from IMR/DRAM storage and frees its memory. + * All module files belonging to the library must be unloaded from SRAM first + * (i.e., all pipeline instances using the library must be torn down). + * + * Example: + * uart:~$ sof llext_purge 1 + * llext_purge: lib 1 freed OK + */ +__cold static int cmd_sof_llext_purge(const struct shell *sh, + size_t argc, char *argv[]) +{ +#if CONFIG_LIBRARY_MANAGER + char *endptr = NULL; + long lib_id; + int ret; + + lib_id = strtol(argv[1], &endptr, 0); + if (endptr == argv[1] || lib_id < 1 || lib_id >= LIB_MANAGER_MAX_LIBS) { + shell_error(sh, "lib_id must be 1..%d", LIB_MANAGER_MAX_LIBS - 1); + return -EINVAL; + } + + ret = lib_manager_purge_library((uint32_t)lib_id); + switch (ret) { + case 0: + shell_print(sh, "llext_purge: lib %ld freed OK", lib_id); + break; + case -ENOENT: + shell_error(sh, "llext_purge: lib %ld not loaded", lib_id); + break; + case -EBUSY: + shell_error(sh, "llext_purge: lib %ld still active in SRAM — " + "destroy all pipelines using it first", lib_id); + break; + default: + shell_error(sh, "llext_purge: lib %ld failed: %d", lib_id, ret); + break; + } + return ret; +#else + ARG_UNUSED(argc); + ARG_UNUSED(argv); + shell_print(sh, "Library manager not enabled."); + return -ENOSYS; +#endif /* CONFIG_LIBRARY_MANAGER */ +} + +#endif /* CONFIG_SOF_SHELL_LLEXT_PURGE */ + +static int cmd_sof_version(const struct shell *sh, size_t argc, char *argv[]) +{ + shell_print(sh, "SOF Version: %d.%d.%d-%s (Build %d)", + SOF_MAJOR, SOF_MINOR, SOF_MICRO, SOF_TAG, SOF_BUILD); + shell_print(sh, "Git Tag: %s", SOF_GIT_TAG); + shell_print(sh, "Source Hash: 0x%08x", SOF_SRC_HASH); + return 0; +} + +static int cmd_sof_vpage_info(const struct shell *sh, size_t argc, char *argv[]) +{ +#if CONFIG_SOF_VREGIONS + vpage_info(sh); +#else + shell_fprintf(sh, SHELL_NORMAL, "Virtual regions not enabled\n"); +#endif + return 0; +} + +static int cmd_sof_vregion_info(const struct shell *sh, size_t argc, char *argv[]) +{ +#if CONFIG_SOF_VREGIONS + vregion_info_all(sh); +#else + shell_fprintf(sh, SHELL_NORMAL, "Virtual regions not enabled\n"); +#endif + return 0; +} + +static int cmd_sof_pipeline_list(const struct shell *sh, size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct list_item *clist; + struct ipc_comp_dev *icd; + struct pipeline *p; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + shell_print(sh, "ID Core Status Priority Period"); + list_for_item(clist, &ipc->comp_list) { + icd = container_of(clist, struct ipc_comp_dev, list); + if (icd->type != COMP_TYPE_PIPELINE) + continue; + + p = icd->pipeline; + shell_print(sh, "0x%08x %d %d %d %d", + p->pipeline_id, p->core, p->status, p->priority, p->period); + } + return 0; +} + +static int cmd_sof_ipc_stats(const struct shell *sh, size_t argc, char *argv[]) +{ + struct ipc_stats s; + + if (argc > 1 && !strcmp(argv[1], "reset")) { + ipc_stats_reset(); + shell_print(sh, "ipc stats reset"); + return 0; + } + + ipc_stats_get(&s); + shell_print(sh, "IPC statistics:"); + shell_print(sh, " rx_count : %u", s.rx_count); + shell_print(sh, " rx_errors : %u", s.rx_errors); + shell_print(sh, " tx_count : %u", s.tx_count); + shell_print(sh, " tx_direct_count : %u", s.tx_direct_count); + shell_print(sh, " tx_errors : %u", s.tx_errors); + return 0; +} + +static int cmd_sof_ipc_last(const struct shell *sh, size_t argc, char *argv[]) +{ + struct ipc_stats s; + + ipc_stats_get(&s); + shell_print(sh, "Last IPC RX: pri=0x%08x ext=0x%08x @ %llu cycles", + s.last_rx_pri, s.last_rx_ext, (unsigned long long)s.last_rx_time); + shell_print(sh, "Last IPC TX: pri=0x%08x ext=0x%08x @ %llu cycles", + s.last_tx_pri, s.last_tx_ext, (unsigned long long)s.last_tx_time); + return 0; +} + +#if CONFIG_SOF_SHELL_BUFFER_INFO + +static void shell_print_buffer(const struct shell *sh, struct comp_buffer *buf, + uint32_t src_id, uint32_t sink_id) +{ + const struct audio_stream *s = &buf->stream; + uint32_t size = audio_stream_get_size(s); + uint32_t avail = audio_stream_get_avail(s); + uint32_t freeb = audio_stream_get_free(s); + + shell_print(sh, + " buf 0x%08x src 0x%08x -> sink 0x%08x" + " size %u avail %u free %u ch %u rate %u fmt %d", + buf_get_id(buf), src_id, sink_id, + size, avail, freeb, + audio_stream_get_channels(s), + audio_stream_get_rate(s), + (int)audio_stream_get_frm_fmt(s)); +} + +/* + * Walk every component in the IPC topology and visit each downstream + * (bsink_list) buffer once. cb() is called for every (buffer, source, sink) + * tuple. Returns the number of buffers visited. + */ +static int shell_for_each_buffer(struct ipc *ipc, + void (*cb)(const struct shell *sh, + struct comp_buffer *buf, + uint32_t src_id, uint32_t sink_id, + void *ctx), + const struct shell *sh, void *ctx) +{ + struct list_item *clist; + struct ipc_comp_dev *icd; + int count = 0; + + list_for_item(clist, &ipc->comp_list) { + struct comp_dev *cd; + struct comp_buffer *buf; + + icd = container_of(clist, struct ipc_comp_dev, list); + if (icd->type != COMP_TYPE_COMPONENT) + continue; + + cd = icd->cd; + buf = comp_dev_get_first_data_consumer(cd); + while (buf) { + struct comp_dev *sink = comp_buffer_get_sink_component(buf); + + cb(sh, buf, cd->ipc_config.id, + sink ? sink->ipc_config.id : 0, ctx); + count++; + buf = comp_dev_get_next_data_consumer(cd, buf); + } + } + + return count; +} + +static void buf_list_cb(const struct shell *sh, struct comp_buffer *buf, + uint32_t src_id, uint32_t sink_id, void *ctx) +{ + ARG_UNUSED(ctx); + shell_print_buffer(sh, buf, src_id, sink_id); +} + +__cold static int cmd_sof_buffer_list(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + int n; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + shell_print(sh, "Audio buffers:"); + n = shell_for_each_buffer(ipc, buf_list_cb, sh, NULL); + if (!n) + shell_print(sh, " (none)"); + + return 0; +} + +struct buf_find_ctx { + uint32_t want_id; + struct comp_buffer *found; + uint32_t src_id; + uint32_t sink_id; +}; + +static void buf_find_cb(const struct shell *sh, struct comp_buffer *buf, + uint32_t src_id, uint32_t sink_id, void *ctx) +{ + struct buf_find_ctx *c = ctx; + + ARG_UNUSED(sh); + if (c->found) + return; + if (buf_get_id(buf) == c->want_id) { + c->found = buf; + c->src_id = src_id; + c->sink_id = sink_id; + } +} + +__cold static int cmd_sof_buffer_info(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct buf_find_ctx ctx = {0}; + const struct audio_stream *s; + char *endptr = NULL; + long id; + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + id = strtol(argv[1], &endptr, 0); + if (endptr == argv[1]) { + shell_error(sh, "buffer_info: invalid id"); + return -EINVAL; + } + + ctx.want_id = (uint32_t)id; + shell_for_each_buffer(ipc, buf_find_cb, sh, &ctx); + + if (!ctx.found) { + shell_print(sh, "buffer 0x%08x not found", (uint32_t)id); + return -ENOENT; + } + + s = &ctx.found->stream; + shell_print(sh, "Buffer 0x%08x:", buf_get_id(ctx.found)); + shell_print(sh, " source comp : 0x%08x", ctx.src_id); + shell_print(sh, " sink comp : 0x%08x", ctx.sink_id); + shell_print(sh, " core : %u", ctx.found->core); + shell_print(sh, " flags : 0x%08x", ctx.found->flags); + shell_print(sh, " size bytes : %u", audio_stream_get_size(s)); + shell_print(sh, " avail bytes : %u", audio_stream_get_avail(s)); + shell_print(sh, " free bytes : %u", audio_stream_get_free(s)); + shell_print(sh, " rptr : %p", audio_stream_get_rptr(s)); + shell_print(sh, " wptr : %p", audio_stream_get_wptr(s)); + shell_print(sh, " channels : %u", audio_stream_get_channels(s)); + shell_print(sh, " rate : %u", audio_stream_get_rate(s)); + shell_print(sh, " frame fmt : %d", (int)audio_stream_get_frm_fmt(s)); + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_BUFFER_INFO */ + +#if CONFIG_SOF_SHELL_SCHED_INFO + +static const char *sched_type_str(int type) +{ + switch (type) { + case SOF_SCHEDULE_EDF: return "edf"; + case SOF_SCHEDULE_LL_TIMER: return "ll_timer"; + case SOF_SCHEDULE_LL_DMA: return "ll_dma"; + case SOF_SCHEDULE_DP: return "dp"; + case SOF_SCHEDULE_TWB: return "twb"; + default: return "?"; + } +} + +static const char *sched_state_str(enum task_state s) +{ + switch (s) { + case SOF_TASK_STATE_INIT: return "init"; + case SOF_TASK_STATE_QUEUED: return "queued"; + case SOF_TASK_STATE_PENDING: return "pending"; + case SOF_TASK_STATE_RUNNING: return "running"; + case SOF_TASK_STATE_PREEMPTED: return "preempt"; + case SOF_TASK_STATE_COMPLETED: return "done"; + case SOF_TASK_STATE_FREE: return "free"; + case SOF_TASK_STATE_CANCEL: return "cancel"; + case SOF_TASK_STATE_RESCHEDULE: return "resched"; + default: return "?"; + } +} + +struct sched_walk_ctx { + const struct shell *sh; + int sch_type; + uint32_t total_sum; + uint32_t total_cnt; + uint32_t total_max; + int task_count; + bool show_load; +}; + +static void sched_list_cb(struct task *task, void *_ctx) +{ + struct sched_walk_ctx *c = _ctx; + uint32_t avg = task->cycles_cnt ? task->cycles_sum / task->cycles_cnt : 0; + + if (c->show_load) { + shell_print(c->sh, + " %-9s core %u prio %3u state %-7s" + " count %u avg %u max %u sum %u cyc", + sched_type_str(c->sch_type), task->core, + task->priority, sched_state_str(task->state), + task->cycles_cnt, avg, task->cycles_max, + task->cycles_sum); + } else { + shell_print(c->sh, + " %-9s core %u prio %3u state %-7s" + " flags 0x%04x uid %p data %p", + sched_type_str(c->sch_type), task->core, + task->priority, sched_state_str(task->state), + task->flags, (const void *)task->uid, task->data); + } + + c->total_sum += task->cycles_sum; + c->total_cnt += task->cycles_cnt; + if (task->cycles_max > c->total_max) + c->total_max = task->cycles_max; + c->task_count++; +} + +static int sched_walk(const struct shell *sh, bool show_load) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct sched_walk_ctx ctx = { .sh = sh, .show_load = show_load }; + struct schedule_data *sch; + struct list_item *slist; + + if (!schedulers) { + shell_print(sh, "No schedulers registered"); + return 0; + } + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (!sch->ops->scheduler_dump_tasks) + continue; + ctx.sch_type = sch->type; + sch->ops->scheduler_dump_tasks(sch->data, sched_list_cb, &ctx); + } + + if (!ctx.task_count) + shell_print(sh, " (no tasks)"); + + if (show_load) { + uint32_t avg = ctx.total_cnt ? ctx.total_sum / ctx.total_cnt : 0; + + shell_print(sh, + "Total: %d tasks count %u avg %u peak max %u cyc", + ctx.task_count, ctx.total_cnt, avg, ctx.total_max); + } + + return 0; +} + +__cold static int cmd_sof_sched_tasks(const struct shell *sh, + size_t argc, char *argv[]) +{ + shell_print(sh, "Active scheduler tasks:"); + return sched_walk(sh, false); +} + +__cold static int cmd_sof_sched_load(const struct shell *sh, + size_t argc, char *argv[]) +{ + shell_print(sh, "Scheduler task cycle counters:"); + return sched_walk(sh, true); +} + +#endif /* CONFIG_SOF_SHELL_SCHED_INFO */ + +#if CONFIG_SOF_SHELL_LOG_INFO + +#include +#include + +__cold static int cmd_sof_log_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + int n = log_backend_count_get(); + int i; + + shell_print(sh, "Log backends: %d, sources: %u", + n, log_src_cnt_get(Z_LOG_LOCAL_DOMAIN_ID)); + shell_print(sh, " idx id active name"); + + for (i = 0; i < n; i++) { + const struct log_backend *be = log_backend_get(i); + + if (!be) + continue; + shell_print(sh, " %3d %3u %-3s %s", + i, log_backend_id_get(be), + log_backend_is_active(be) ? "yes" : "no", + be->name ? be->name : "?"); + } + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_LOG_INFO */ + +#if CONFIG_SOF_SHELL_MTRACE_DUMP + +#include + +/* must match the layout used by zephyr/subsys/logging/backends/log_backend_adsp_mtrace.c */ +struct sof_shell_mtrace_slot { + uint32_t host_ptr; + uint32_t dsp_ptr; + uint8_t data[ADSP_DW_SLOT_SIZE - 2 * sizeof(uint32_t)]; +} __packed; + +#define SOF_SHELL_MTRACE_BUF_SIZE (ADSP_DW_SLOT_SIZE - 2 * sizeof(uint32_t)) +#define SOF_SHELL_MTRACE_TYPE(core) \ + (ADSP_DW_SLOT_DEBUG_LOG | ((core) & ADSP_DW_SLOT_CORE_MASK)) + +__cold static int cmd_sof_mtrace_dump(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct sof_shell_mtrace_slot *slot; + uint32_t r, w, len, i; + +#ifdef CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER + struct adsp_dw_desc desc = { .type = SOF_SHELL_MTRACE_TYPE(0) }; + + slot = adsp_dw_request_slot(&desc, NULL); +#else + slot = (struct sof_shell_mtrace_slot *) + ADSP_DW->slots[ADSP_DW_SLOT_NUM_MTRACE]; +#endif + if (!slot) { + shell_print(sh, "mtrace slot not available"); + return -ENODEV; + } + + r = slot->host_ptr; + w = slot->dsp_ptr; + + if (r == w) { + shell_print(sh, "mtrace: empty (host_ptr=dsp_ptr=%u)", r); + return 0; + } + + if (w > r) + len = w - r; + else + len = SOF_SHELL_MTRACE_BUF_SIZE - r + w; + + shell_print(sh, + "mtrace: host_ptr=%u dsp_ptr=%u unread=%u bytes (snapshot)", + r, w, len); + + /* print byte-by-byte without advancing host_ptr; preserves host consumer */ + for (i = 0; i < len; i++) { + uint32_t off = (r + i) % SOF_SHELL_MTRACE_BUF_SIZE; + + shell_fprintf(sh, SHELL_NORMAL, "%c", slot->data[off]); + } + shell_fprintf(sh, SHELL_NORMAL, "\n"); + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_MTRACE_DUMP */ + +#if CONFIG_SOF_SHELL_MAILBOX_HEX || CONFIG_SOF_SHELL_DBGWIN_DUMP + +static void sof_shell_hex_dump(const struct shell *sh, uintptr_t base, + size_t off, size_t len) +{ + const uint8_t *p = (const uint8_t *)(base + off); + size_t i, j; + + for (i = 0; i < len; i += 16) { + size_t row = MIN((size_t)16, len - i); + char ascii[17]; + + shell_fprintf(sh, SHELL_NORMAL, "%08lx ", + (unsigned long)(off + i)); + for (j = 0; j < 16; j++) { + if (j < row) + shell_fprintf(sh, SHELL_NORMAL, " %02x", p[i + j]); + else + shell_fprintf(sh, SHELL_NORMAL, " "); + ascii[j] = (j < row && p[i + j] >= 0x20 && p[i + j] < 0x7f) ? + (char)p[i + j] : '.'; + } + ascii[16] = '\0'; + shell_fprintf(sh, SHELL_NORMAL, " %s\n", ascii); + } +} + +#endif + +#if CONFIG_SOF_SHELL_MAILBOX_HEX + +#include + +struct sof_shell_mb_region { + const char *name; + uintptr_t base; + size_t size; +}; + +static const struct sof_shell_mb_region sof_shell_mb_regions[] = { + { "exception", MAILBOX_EXCEPTION_BASE, MAILBOX_EXCEPTION_SIZE }, + { "dspbox", MAILBOX_DSPBOX_BASE, MAILBOX_DSPBOX_SIZE }, + { "hostbox", MAILBOX_HOSTBOX_BASE, MAILBOX_HOSTBOX_SIZE }, + { "debug", MAILBOX_DEBUG_BASE, MAILBOX_DEBUG_SIZE }, +}; + +__cold static int cmd_sof_mailbox_hex(const struct shell *sh, + size_t argc, char *argv[]) +{ + const struct sof_shell_mb_region *r = NULL; + size_t off = 0, len; + char *end = NULL; + int i; + + if (argc < 2) { + shell_print(sh, "Mailbox regions:"); + for (i = 0; i < ARRAY_SIZE(sof_shell_mb_regions); i++) + shell_print(sh, " %-10s base 0x%08lx size %zu", + sof_shell_mb_regions[i].name, + (unsigned long)sof_shell_mb_regions[i].base, + sof_shell_mb_regions[i].size); + shell_print(sh, "Usage: sof mailbox_hex [offset] [length]"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(sof_shell_mb_regions); i++) { + if (!strcmp(argv[1], sof_shell_mb_regions[i].name)) { + r = &sof_shell_mb_regions[i]; + break; + } + } + if (!r) { + shell_print(sh, "Unknown region '%s'", argv[1]); + return -EINVAL; + } + + if (argc > 2) { + off = strtoul(argv[2], &end, 0); + if (end == argv[2] || off >= r->size) { + shell_print(sh, "Bad offset (max 0x%zx)", r->size); + return -EINVAL; + } + } + + len = MIN((size_t)256, r->size - off); + if (argc > 3) { + len = strtoul(argv[3], &end, 0); + if (end == argv[3]) + return -EINVAL; + len = MIN(len, r->size - off); + } + + shell_print(sh, "%s @ 0x%08lx + 0x%zx, %zu bytes:", + r->name, (unsigned long)r->base, off, len); + sof_shell_hex_dump(sh, r->base, off, len); + return 0; +} + +#endif /* CONFIG_SOF_SHELL_MAILBOX_HEX */ + +#if CONFIG_SOF_SHELL_DBGWIN_DUMP + +#include +#include + +/* Mirror struct used by zephyr/soc/intel/intel_adsp/common/debug_window.c. + * We map window 2 directly so we can read descriptors and slot data without + * depending on slot-manager internals. + */ +struct sof_shell_dw { + struct adsp_dw_desc descs[ADSP_DW_DESC_COUNT]; + uint8_t reserved[ADSP_DW_PAGE0_SLOT_OFFSET - + ADSP_DW_DESC_COUNT * sizeof(struct adsp_dw_desc)]; + uint8_t partial_page0[ADSP_DW_SLOT_SIZE - ADSP_DW_PAGE0_SLOT_OFFSET]; + uint8_t slots[ADSP_DW_SLOT_COUNT][ADSP_DW_SLOT_SIZE]; +} __packed; + +#define SOF_SHELL_DW_BASE \ + (DT_REG_ADDR(DT_PHANDLE(DT_NODELABEL(mem_window2), memory)) + WIN2_OFFSET) + +static const char *dw_type_name(uint32_t type) +{ + switch (type & ADSP_DW_SLOT_TYPE_MASK) { + case ADSP_DW_SLOT_UNUSED & ADSP_DW_SLOT_TYPE_MASK: + return type ? "?" : "unused"; + case ADSP_DW_SLOT_CRITICAL_LOG & ADSP_DW_SLOT_TYPE_MASK: + return "critical_log"; + case ADSP_DW_SLOT_DEBUG_LOG & ADSP_DW_SLOT_TYPE_MASK: + return "debug_log"; + case ADSP_DW_SLOT_GDB_STUB & ADSP_DW_SLOT_TYPE_MASK: + return "gdb_stub"; + case ADSP_DW_SLOT_TELEMETRY & ADSP_DW_SLOT_TYPE_MASK: + return "telemetry"; + case ADSP_DW_SLOT_TRACE & ADSP_DW_SLOT_TYPE_MASK: + return "trace"; + case ADSP_DW_SLOT_SHELL & ADSP_DW_SLOT_TYPE_MASK: + return "shell"; + case ADSP_DW_SLOT_DEBUG_STREAM & ADSP_DW_SLOT_TYPE_MASK: + return "debug_stream"; + case ADSP_DW_SLOT_BROKEN & ADSP_DW_SLOT_TYPE_MASK: + return "broken"; + default: + return "?"; + } +} + +__cold static int cmd_sof_dbgwin_dump(const struct shell *sh, + size_t argc, char *argv[]) +{ + volatile struct sof_shell_dw *dw = + (volatile struct sof_shell_dw *) + sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *) + SOF_SHELL_DW_BASE); + int slot, i; + size_t len = 256; + char *end = NULL; + + if (argc < 2) { + shell_print(sh, + "ADSP debug window @ 0x%08lx (%d slots, %u bytes each)", + (unsigned long)SOF_SHELL_DW_BASE, + ADSP_DW_SLOT_COUNT, ADSP_DW_SLOT_SIZE); + shell_print(sh, " slot res_id type vma name"); + for (i = 0; i < ADSP_DW_SLOT_COUNT; i++) { + shell_print(sh, + " %3d 0x%08x 0x%08x 0x%08x %s (core %u)", + i, dw->descs[i].resource_id, dw->descs[i].type, + dw->descs[i].vma, dw_type_name(dw->descs[i].type), + dw->descs[i].type & ADSP_DW_SLOT_CORE_MASK); + } + shell_print(sh, "Usage: sof dbgwin_dump [length]"); + return 0; + } + + slot = strtol(argv[1], &end, 0); + if (end == argv[1] || slot < 0 || slot >= ADSP_DW_SLOT_COUNT) { + shell_print(sh, "Bad slot (0..%d)", ADSP_DW_SLOT_COUNT - 1); + return -EINVAL; + } + + if (argc > 2) { + len = strtoul(argv[2], &end, 0); + if (end == argv[2]) + return -EINVAL; + } + len = MIN(len, (size_t)ADSP_DW_SLOT_SIZE); + + shell_print(sh, "Slot %d type=0x%08x (%s, core %u) vma=0x%08x; %zu bytes:", + slot, dw->descs[slot].type, dw_type_name(dw->descs[slot].type), + dw->descs[slot].type & ADSP_DW_SLOT_CORE_MASK, + dw->descs[slot].vma, len); + sof_shell_hex_dump(sh, (uintptr_t)dw->slots[slot], 0, len); + return 0; +} + +#endif /* CONFIG_SOF_SHELL_DBGWIN_DUMP */ + +#if CONFIG_SOF_SHELL_PERF_STATUS + +#include +#include +#include + +static const char *perf_state_str(enum ipc4_perf_measurements_state_set s) +{ + switch (s) { + case IPC4_PERF_MEASUREMENTS_DISABLED: return "disabled"; + case IPC4_PERF_MEASUREMENTS_STOPPED: return "stopped"; + case IPC4_PERF_MEASUREMENTS_STARTED: return "started"; + case IPC4_PERF_MEASUREMENTS_PAUSED: return "paused"; + default: return "?"; + } +} + +__cold static int cmd_sof_perf_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct system_tick_info *systick; + int core_id, ret; + + if (argc > 1) { + if (!strcmp(argv[1], "reset")) { + ret = reset_performance_counters(); + shell_print(sh, "perf: reset_performance_counters() = %d", ret); + return ret; + } + if (!strcmp(argv[1], "start")) { + ret = enable_performance_counters(); + if (!ret) + perf_meas_set_state(IPC4_PERF_MEASUREMENTS_STARTED); + shell_print(sh, "perf: enable_performance_counters() = %d", ret); + return ret; + } + if (!strcmp(argv[1], "stop")) { + perf_meas_set_state(IPC4_PERF_MEASUREMENTS_STOPPED); + shell_print(sh, "perf: stopped"); + return 0; + } + if (!strcmp(argv[1], "pause")) { + perf_meas_set_state(IPC4_PERF_MEASUREMENTS_PAUSED); + shell_print(sh, "perf: paused"); + return 0; + } + shell_print(sh, "Usage: sof perf_status [reset|start|stop|pause]"); + return -EINVAL; + } + + shell_print(sh, "Performance measurements: %s", + perf_state_str(perf_meas_get_state())); + +#ifdef CONFIG_INTEL_ADSP_DEBUG_SLOT_MANAGER + systick = telemetry_get_systick_info_ptr(); + if (!systick) { + shell_print(sh, "telemetry slot not allocated"); + return 0; + } +#else + { + struct telemetry_wnd_data *wnd = + (struct telemetry_wnd_data *) + ADSP_DW->slots[SOF_DW_TELEMETRY_SLOT]; + systick = (struct system_tick_info *)wnd->system_tick_info; + } +#endif + + shell_print(sh, "Per-core systick (count, last_us_cyc, max_us_cyc, avg_kcps, peak_kcps):"); + for (core_id = 0; core_id < CONFIG_MAX_CORE_COUNT; core_id++) { + if (!(cpu_enabled_cores() & BIT(core_id))) + continue; + shell_print(sh, + " core %u: count=%u last=%u max=%u avg_kcps=%u peak_kcps=%u peak4k=%u peak8k=%u", + core_id, + systick[core_id].count, + systick[core_id].last_time_elapsed, + systick[core_id].max_time_elapsed, + systick[core_id].avg_utilization, + systick[core_id].peak_utilization, + systick[core_id].peak_utilization_4k, + systick[core_id].peak_utilization_8k); + } + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_PERF_STATUS */ + +#if CONFIG_SOF_SHELL_DAI_LIST || CONFIG_SOF_SHELL_DMA_STATUS +#include +#include +#include +#endif + +#if CONFIG_SOF_SHELL_DAI_LIST + +static const char *zephyr_dai_type_str(int t) +{ + switch (t) { + case DAI_LEGACY_I2S: return "i2s"; + case DAI_INTEL_SSP: return "ssp"; + case DAI_INTEL_DMIC: return "dmic"; + case DAI_INTEL_HDA: return "hda"; + case DAI_INTEL_ALH: return "alh"; + case DAI_IMX_SAI: return "sai"; + case DAI_IMX_ESAI: return "esai"; + case DAI_AMD_BT: return "amd_bt"; + case DAI_AMD_SP: return "amd_sp"; + case DAI_AMD_DMIC: return "amd_dmic"; + case DAI_MEDIATEK_AFE: return "mtk_afe"; + case DAI_INTEL_SSP_NHLT: return "ssp_nhlt"; + case DAI_INTEL_DMIC_NHLT: return "dmic_nhlt"; + case DAI_INTEL_HDA_NHLT: return "hda_nhlt"; + case DAI_INTEL_ALH_NHLT: return "alh_nhlt"; + case DAI_IMX_MICFIL: return "micfil"; + case DAI_INTEL_UAOL: return "uaol"; + case DAI_AMD_SDW: return "amd_sdw"; + default: return "?"; + } +} + +__cold static int cmd_sof_dai_list(const struct shell *sh, + size_t argc, char *argv[]) +{ + const struct device **list; + size_t count = 0; + int i; + + list = dai_get_device_list(&count); + if (!list || !count) { + shell_print(sh, "No DAIs registered"); + return 0; + } + + shell_print(sh, "%zu DAI(s) registered:", count); + shell_print(sh, " idx name type index channels rate fmt word"); + for (i = 0; i < count; i++) { + const struct device *dev = list[i]; + struct dai_config cfg = {0}; + const struct dai_properties *props; + + if (dai_config_get(dev, &cfg, DAI_DIR_BOTH)) { + /* try TX-only then RX-only */ + if (dai_config_get(dev, &cfg, DAI_DIR_TX) && + dai_config_get(dev, &cfg, DAI_DIR_RX)) { + shell_print(sh, " %3d %-26s (config_get failed)", + i, dev->name ? dev->name : "?"); + continue; + } + } + + shell_print(sh, + " %3d %-26s %-10s %5u %8u %7u 0x%04x %4u", + i, dev->name ? dev->name : "?", + zephyr_dai_type_str(cfg.type), cfg.dai_index, + cfg.channels, cfg.rate, cfg.format, cfg.word_size); + + props = dai_get_properties(dev, DAI_DIR_TX, 0); + if (props) + shell_print(sh, + " TX: fifo 0x%08x depth %u hs %u stream %d", + props->fifo_address, props->fifo_depth, + props->dma_hs_id, props->stream_id); + props = dai_get_properties(dev, DAI_DIR_RX, 0); + if (props) + shell_print(sh, + " RX: fifo 0x%08x depth %u hs %u stream %d", + props->fifo_address, props->fifo_depth, + props->dma_hs_id, props->stream_id); + } + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_DAI_LIST */ + +#if CONFIG_SOF_SHELL_DMA_STATUS + +static const char *dma_dir_str(enum dma_channel_direction d) +{ + switch (d) { + case MEMORY_TO_MEMORY: return "M2M"; + case MEMORY_TO_PERIPHERAL: return "M2P"; + case PERIPHERAL_TO_MEMORY: return "P2M"; + case PERIPHERAL_TO_PERIPHERAL: return "P2P"; + case HOST_TO_MEMORY: return "H2M"; + case MEMORY_TO_HOST: return "M2H"; + default: return "?"; + } +} + +static void dma_print_one(const struct shell *sh, struct sof_dma *dma, + int dma_idx, int chan) +{ + struct dma_status st = {0}; + int ret = sof_dma_get_status(dma, chan, &st); + + if (ret) { + shell_print(sh, " dma %d ch %d: get_status -> %d", + dma_idx, chan, ret); + return; + } + shell_print(sh, + " dma %d ch %d: %s dir=%s pending=%u free=%u rd=%u wr=%u total=%llu", + dma_idx, chan, st.busy ? "BUSY" : "idle", + dma_dir_str(st.dir), st.pending_length, st.free, + st.read_position, st.write_position, + (unsigned long long)st.total_copied); +} + +__cold static int cmd_sof_dma_status(const struct shell *sh, + size_t argc, char *argv[]) +{ + const struct dma_info *info = dma_info_get(); + struct sof_dma *dma; + int i, ch; + + if (!info || !info->num_dmas) { + shell_print(sh, "No DMA controllers registered"); + return 0; + } + + if (argc == 1) { + shell_print(sh, "%zu DMA controller(s):", info->num_dmas); + shell_print(sh, " idx id channels busy caps devs base"); + for (i = 0; i < info->num_dmas; i++) { + dma = &info->dma_array[i]; + shell_print(sh, + " %3d %2u %8u %4u 0x%04x 0x%04x 0x%08x (%s)", + i, dma->plat_data.id, + dma->plat_data.channels, + (unsigned int)atomic_get(&dma->num_channels_busy), + dma->plat_data.caps, + dma->plat_data.devs, + dma->plat_data.base, + dma->z_dev && dma->z_dev->name ? + dma->z_dev->name : "?"); + } + shell_print(sh, + "Usage: sof dma_status [chan] (omit chan to walk all)"); + return 0; + } + + { + char *end = NULL; + long idx = strtol(argv[1], &end, 0); + + if (end == argv[1] || idx < 0 || idx >= (long)info->num_dmas) { + shell_print(sh, "Bad DMA index (0..%zu)", + info->num_dmas - 1); + return -EINVAL; + } + dma = &info->dma_array[idx]; + + if (argc > 2) { + ch = strtol(argv[2], &end, 0); + if (end == argv[2] || ch < 0 || + ch >= (int)dma->plat_data.channels) { + shell_print(sh, "Bad channel (0..%u)", + dma->plat_data.channels - 1); + return -EINVAL; + } + dma_print_one(sh, dma, (int)idx, ch); + return 0; + } + + shell_print(sh, "DMA %ld (%s): %u channels", + idx, dma->z_dev && dma->z_dev->name ? + dma->z_dev->name : "?", + dma->plat_data.channels); + for (ch = 0; ch < (int)dma->plat_data.channels; ch++) + dma_print_one(sh, dma, (int)idx, ch); + } + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_DMA_STATUS */ + +#if CONFIG_SOF_SHELL_KCTL_LIST + +/* + * Best-effort decoded driver name. Module-adapter components share + * SOF_COMP_MODULE_ADAPTER for drv->type, so the only stable per-module + * label available in firmware is the UUID name string from the trace + * context (which carries the same name printed by the LDC tool). + */ +static const char *kctl_drv_name(const struct comp_dev *cd) +{ + if (cd && cd->drv && cd->drv->tctx && cd->drv->tctx->uuid_p && + cd->drv->tctx->uuid_p->name[0]) + return cd->drv->tctx->uuid_p->name; + return "?"; +} + +/* + * Tag the modules that are known to expose ALSA-style kcontrols + * (volume / gain / mixer-style switches and enums). This is purely a + * UI hint -- the actual control values live behind per-module + * config_id blobs that need IPC4 large_config marshalling, which is + * intentionally out of scope here (see shell.md). + */ +static const char *kctl_drv_kind(const char *name) +{ + if (!name) + return ""; + if (!strcmp(name, "volume") || !strcmp(name, "gain")) + return "volume"; + if (!strcmp(name, "mixin") || !strcmp(name, "mixout") || + !strcmp(name, "mixer")) + return "mixer"; + if (!strcmp(name, "eqiir") || !strcmp(name, "eqfir") || + !strcmp(name, "drc") || !strcmp(name, "multiband_drc") || + !strcmp(name, "dcblock") || !strcmp(name, "tdfb") || + !strcmp(name, "crossover") || !strcmp(name, "google_rtc_audio_processing")) + return "blob"; + if (!strcmp(name, "selector") || !strcmp(name, "src") || + !strcmp(name, "asrc")) + return "config"; + return ""; +} + +__cold static int cmd_sof_kctl_list(const struct shell *sh, + size_t argc, char *argv[]) +{ + struct ipc *ipc = sof_get()->ipc; + struct list_item *clist; + struct ipc_comp_dev *icd; + int count = 0; + + ARG_UNUSED(argc); + ARG_UNUSED(argv); + + if (!ipc) { + shell_print(sh, "No IPC"); + return 0; + } + + shell_print(sh, "%-12s %-8s %-5s %-24s %-8s %s", + "comp_id", "ppl_id", "core", "module", "kind", "state"); + + list_for_item(clist, &ipc->comp_list) { + const struct comp_dev *cd; + const char *name; + + icd = container_of(clist, struct ipc_comp_dev, list); + if (icd->type != COMP_TYPE_COMPONENT) + continue; + + cd = icd->cd; + name = kctl_drv_name(cd); + + shell_print(sh, "0x%-10x %-8u %-5u %-24s %-8s %s", + icd->id, + cd->pipeline ? cd->pipeline->pipeline_id : 0, + icd->core, name, kctl_drv_kind(name), + comp_state_str(cd->state)); + count++; + } + + if (!count) { + shell_print(sh, + "No components found. Start an audio stream first."); + return 0; + } + + shell_print(sh, ""); + shell_print(sh, + "kctl get/set is intentionally not exposed here -- control"); + shell_print(sh, + "values flow through per-module IPC4 large_config blobs"); + shell_print(sh, + "(set_configuration / get_configuration). Use tinymix /"); + shell_print(sh, + "sof-ctl on the host, or 'sof module_status' for raw state."); + + return 0; +} + +#endif /* CONFIG_SOF_SHELL_KCTL_LIST */ + +SHELL_STATIC_SUBCMD_SET_CREATE(sof_commands, + SHELL_CMD(test_inject_sched_gap, NULL, + "Inject a gap to audio scheduling\n", + cmd_sof_test_inject_sched_gap), + +#if CONFIG_SOF_SHELL_HEAP_USAGE + SHELL_CMD(module_heap_usage, NULL, + "Print heap memory usage of each module\n", + cmd_sof_module_heap_usage), +#endif + +#if CONFIG_SOF_SHELL_PIPELINE_STATUS + SHELL_CMD(pipeline_status, NULL, + "Print status of all active pipelines\n", + cmd_sof_pipeline_status), +#endif + +#if CONFIG_SOF_SHELL_MODULE_STATUS + SHELL_CMD(module_status, NULL, + "Print status of all active components\n", + cmd_sof_module_status), +#endif + +#if CONFIG_SOF_SHELL_CORE_STATUS + SHELL_CMD(core_status, NULL, + "Print enabled/active state of each DSP core\n", + cmd_sof_core_status), +#endif + +#if CONFIG_SOF_SHELL_CORE_POWER + SHELL_CMD_ARG(core_on, NULL, + "Power on a secondary DSP core: \n" + "core_id must be 1..CONFIG_CORE_COUNT-1 (core 0 is primary).\n", + cmd_sof_core_on, 2, 0), + SHELL_CMD_ARG(core_off, NULL, + "Power off a secondary DSP core: \n" + "core_id must be 1..CONFIG_CORE_COUNT-1 (core 0 is primary).\n", + cmd_sof_core_off, 2, 0), +#endif + +#if CONFIG_SOF_SHELL_SRAM_STATUS + SHELL_CMD(sram_status, NULL, + "Print HPSRAM heap usage statistics\n", + cmd_sof_sram_status), +#endif + +#if CONFIG_SOF_SHELL_CLOCK_STATUS + SHELL_CMD(clock_status, NULL, + "Print current clock frequency for each DSP core\n", + cmd_sof_clock_status), +#endif + +#if CONFIG_SOF_SHELL_MODULE_LIST + SHELL_CMD(module_list, NULL, + "List all available modules with name, memory, size and RTC info\n", + cmd_sof_module_list), +#endif + +#if CONFIG_SOF_SHELL_PIPELINE_OPS + SHELL_CMD_ARG(ppl_create, NULL, + "Create IPC4 pipeline: [priority=0] [pages=2] [core=0] [lp=0]\n", + cmd_sof_ppl_create, 2, 4), + SHELL_CMD_ARG(ppl_delete, NULL, + "Delete IPC4 pipeline: \n", + cmd_sof_ppl_delete, 2, 0), + SHELL_CMD_ARG(ppl_state, NULL, + "Set IPC4 pipeline state: \n", + cmd_sof_ppl_state, 3, 0), + SHELL_CMD_ARG(mod_init, NULL, + "Instantiate module: [core=0] [dp=0]\n", + cmd_sof_mod_init, 4, 2), + SHELL_CMD_ARG(mod_delete, NULL, + "Delete module instance: \n", + cmd_sof_mod_delete, 3, 0), + SHELL_CMD_ARG(mod_bind, NULL, + "Bind two module instances: " + " [src_q=0] [dst_q=0]\n", + cmd_sof_mod_bind, 5, 2), + SHELL_CMD_ARG(mod_unbind, NULL, + "Unbind two module instances: " + " [src_q=0] [dst_q=0]\n", + cmd_sof_mod_unbind, 5, 2), +#endif + +#if CONFIG_SOF_SHELL_MMU_DBG +#if CONFIG_MM_DRV_INTEL_ADSP_MTL_TLB + SHELL_CMD(mmu_status, NULL, + "Print Intel ADSP MTL TLB / virtual memory status\n", + cmd_sof_mmu_status), + SHELL_CMD(tlb_dump, NULL, + "Dump all active TLB entries (vaddr/paddr/flags)\n", + cmd_sof_tlb_dump), + SHELL_CMD_ARG(tlb_lookup, NULL, + "Query TLB for a page or range: [end_vaddr]\n", + cmd_sof_tlb_lookup, 2, 1), +#endif +#if CONFIG_XTENSA_MMU + SHELL_CMD(rasid, NULL, + "Decode RASID register: ring 0-3 to ASID mapping\n", + cmd_sof_rasid), + SHELL_CMD_ARG(page_info, NULL, + "Probe DTLB for a page or range: [end_vaddr]\n" + "Reports physical address, ring, ASID, R/W/X permissions" + " and cache mode for each page currently in the DTLB.\n", + cmd_sof_page_info, 2, 1), +#endif +#endif + +#if CONFIG_SOF_SHELL_LLEXT_LOAD + SHELL_CMD_ARG(llext_load, NULL, + "Load llext module from host: [lib_id=1]\n" + "Sets up the DMA handshake slot then waits for:\n" + " dd if= of=/sys/kernel/debug/sof/llext_load\\\n" + " bs=$(stat -c%s ) count=1\n" + "on the host. Prints result when DMA and IPC4 load complete.\n", + cmd_sof_llext_load, 2, 1), +#endif + +#if CONFIG_SOF_SHELL_LLEXT_LIST + SHELL_CMD(llext_list, NULL, + "List llext libraries stored in IMR/DRAM.\n" + "For each library shows base address, storage size and per-module\n" + "SRAM mapping state (yes/no), use count and dependency count.\n", + cmd_sof_llext_list), +#endif + +#if CONFIG_SOF_SHELL_LLEXT_PURGE + SHELL_CMD_ARG(llext_purge, NULL, + "Purge llext library from IMR/DRAM: \n" + "Fails with -EBUSY if any module in the library is still\n" + "mapped in SRAM (i.e. a pipeline using it is still active).\n", + cmd_sof_llext_purge, 2, 0), +#endif + + SHELL_CMD(version, NULL, + "Print the current SOF software version\n", + cmd_sof_version), + + SHELL_CMD(vpage_status, NULL, + "Print virtual page allocator status\n", + cmd_sof_vpage_info), + + SHELL_CMD(vregion_status, NULL, + "Print virtual regions status\n", + cmd_sof_vregion_info), + + SHELL_CMD(pipeline_list, NULL, + "List all active audio pipelines\n", + cmd_sof_pipeline_list), + + SHELL_CMD_ARG(ipc_stats, NULL, + "Print IPC RX/TX counters; 'sof ipc_stats reset' clears them\n", + cmd_sof_ipc_stats, 1, 1), + + SHELL_CMD(ipc_last, NULL, + "Print the last received and sent IPC headers\n", + cmd_sof_ipc_last), + +#if CONFIG_SOF_SHELL_BUFFER_INFO + SHELL_CMD(buffer_list, NULL, + "List all audio buffers (id, source/sink, fill, format)\n", + cmd_sof_buffer_list), + SHELL_CMD_ARG(buffer_info, NULL, + "Detailed info for a single buffer: \n", + cmd_sof_buffer_info, 2, 0), +#endif + +#if CONFIG_SOF_SHELL_SCHED_INFO + SHELL_CMD(sched_tasks, NULL, + "List all scheduler tasks (type, core, prio, state)\n", + cmd_sof_sched_tasks), + SHELL_CMD(sched_load, NULL, + "Show per-task cycle counters and totals\n", + cmd_sof_sched_load), +#endif + +#if CONFIG_SOF_SHELL_LOG_INFO + SHELL_CMD(log_status, NULL, + "List Zephyr log backends with state and source count\n", + cmd_sof_log_status), +#endif + +#if CONFIG_SOF_SHELL_MTRACE_DUMP + SHELL_CMD(mtrace_dump, NULL, + "Snapshot the mtrace SRAM ring buffer (does not advance host_ptr)\n", + cmd_sof_mtrace_dump), +#endif + +#if CONFIG_SOF_SHELL_MAILBOX_HEX + SHELL_CMD_ARG(mailbox_hex, NULL, + "Hex-dump a mailbox region: [offset] [length]\n", + cmd_sof_mailbox_hex, 1, 3), +#endif + +#if CONFIG_SOF_SHELL_DBGWIN_DUMP + SHELL_CMD_ARG(dbgwin_dump, NULL, + "List ADSP debug-window slots, or hex-dump one: [slot] [length]\n", + cmd_sof_dbgwin_dump, 1, 2), +#endif + +#if CONFIG_SOF_SHELL_PERF_STATUS + SHELL_CMD_ARG(perf_status, NULL, + "Show telemetry perf state and per-core systick;" + " optional [reset|start|stop|pause]\n", + cmd_sof_perf_status, 1, 1), +#endif + +#if CONFIG_SOF_SHELL_DAI_LIST + SHELL_CMD(dai_list, NULL, + "List all registered DAIs (name, type, channels, rate, " + "fifo, hs)\n", + cmd_sof_dai_list), +#endif + +#if CONFIG_SOF_SHELL_DMA_STATUS + SHELL_CMD_ARG(dma_status, NULL, + "List DMA controllers, or per-channel status: " + "[dma_idx] [chan]\n", + cmd_sof_dma_status, 1, 2), +#endif + +#if CONFIG_SOF_SHELL_KCTL_LIST + SHELL_CMD(kctl_list, NULL, + "List components and their decoded module name / kind\n", + cmd_sof_kctl_list), +#endif SHELL_SUBCMD_SET_END );