[kernel]add doxygen comments for scheduler. (#10366)

* [kernel]add doxygen comments for scheduler.

* [kernel]fix spell error in comments.
This commit is contained in:
Guorui Li 2025-06-08 15:03:26 +08:00 committed by GitHub
parent 12ac742407
commit f5bff56c5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 557 additions and 31 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006-2024 RT-Thread Development Team * Copyright (c) 2006-2025 RT-Thread Development Team
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -16,6 +16,20 @@
#include <rtthread.h> #include <rtthread.h>
/**
* @brief Initialize thread scheduling context
*
* @param thread The thread to be initialized
* @param tick Initial time slice value for the thread
* @param priority Initial priority of the thread
*
* @details This function performs the following initialization:
* - Sets thread status to INIT
* - For SMP systems:
* * Sets bind CPU to none (RT_CPUS_NR)
* * Marks CPU as detached (RT_CPU_DETACHED)
* - Calls rt_sched_thread_init_priv() for private scheduling data initialization
*/
void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority) void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
{ {
/* setup thread status */ /* setup thread status */
@ -30,6 +44,17 @@ void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uin
rt_sched_thread_init_priv(thread, tick, priority); rt_sched_thread_init_priv(thread, tick, priority);
} }
/**
* @brief Start the thread timer for scheduling
*
* @param thread The thread whose timer needs to be started
*
* @return rt_err_t Always returns RT_EOK on success
*
* @details This function:
* - Requires scheduler lock to be held.
* - Sets the thread's timer flag (sched_flag_ttmr_set) to indicate timer is active
*/
rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread) rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
{ {
RT_SCHED_DEBUG_IS_LOCKED; RT_SCHED_DEBUG_IS_LOCKED;
@ -37,6 +62,15 @@ rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Stop the thread timer for scheduling
*
* @param thread The thread whose timer needs to be stopped
*
* @return rt_err_t
* - RT_EOK if timer was successfully stopped or not active
* - Other error codes from rt_timer_stop() if stop operation failed
*/
rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread) rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
{ {
rt_err_t error; rt_err_t error;
@ -56,26 +90,71 @@ rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
return error; return error;
} }
/**
* @brief Get the current status of a thread
*
* @param thread The thread to get status from
*
* @return rt_uint8_t The thread status masked with RT_THREAD_STAT_MASK
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Returns the thread's status field masked with RT_THREAD_STAT_MASK
*/
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread) rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
{ {
RT_SCHED_DEBUG_IS_LOCKED; RT_SCHED_DEBUG_IS_LOCKED;
return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK; return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
} }
/**
* @brief Get the current priority of a thread
*
* @param thread The thread to get priority from
*
* @return rt_uint8_t The current priority value of the thread
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Returns the thread's current priority field from its private scheduling data
*/
rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread) rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
{ {
RT_SCHED_DEBUG_IS_LOCKED; RT_SCHED_DEBUG_IS_LOCKED;
return RT_SCHED_PRIV(thread).current_priority; return RT_SCHED_PRIV(thread).current_priority;
} }
/**
* @brief Get the initial priority of a thread
*
* @param thread The thread to get priority from
*
* @return rt_uint8_t The initial priority value of the thread
*
* @details This function:
* - Returns the thread's initial priority field from its private scheduling data
* - Does not require scheduler lock as it accesses read-only fields
*/
rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread) rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
{ {
/* read only fields, so lock is unecessary */ /* read only fields, so lock is unnecessary */
return RT_SCHED_PRIV(thread).init_priority; return RT_SCHED_PRIV(thread).init_priority;
} }
/** /**
* @note Caller must hold the scheduler lock * @brief Check if a thread is in suspended state
*
* @param thread The thread to check
*
* @return rt_uint8_t
* - 1 if thread is suspended (matches RT_THREAD_SUSPEND_MASK)
* - 0 otherwise
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Checks thread's status field against RT_THREAD_SUSPEND_MASK
*
* @note Caller must hold the scheduler lock before calling this function
*/ */
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread) rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
{ {
@ -83,6 +162,18 @@ rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK; return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
} }
/**
* @brief Close a thread by setting its status to CLOSED
*
* @param thread The thread to be closed
* @return rt_err_t Always returns RT_EOK on success
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Sets the thread's status to RT_THREAD_CLOSE
*
* @note Must be called with scheduler lock held
*/
rt_err_t rt_sched_thread_close(struct rt_thread *thread) rt_err_t rt_sched_thread_close(struct rt_thread *thread)
{ {
RT_SCHED_DEBUG_IS_LOCKED; RT_SCHED_DEBUG_IS_LOCKED;
@ -90,6 +181,19 @@ rt_err_t rt_sched_thread_close(struct rt_thread *thread)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Yield the current thread's remaining time slice
*
* @param thread The thread to yield
* @return rt_err_t Always returns RT_EOK on success
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Resets the thread's remaining tick count to its initial value
* - Sets the thread's status to YIELD state
*
* @note Must be called with scheduler lock held
*/
rt_err_t rt_sched_thread_yield(struct rt_thread *thread) rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
{ {
RT_SCHED_DEBUG_IS_LOCKED; RT_SCHED_DEBUG_IS_LOCKED;
@ -100,6 +204,27 @@ rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Make a suspended thread ready for scheduling
*
* @param thread The thread to be made ready
*
* @return rt_err_t
* - RT_EOK if operation succeeded
* - -RT_EINVAL if thread is not suspended
* - Other error codes from rt_sched_thread_timer_stop() if timer stop failed
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - Checks if thread is suspended (returns -RT_EINVAL if not)
* - Stops thread timer if active
* - Removes thread from suspend list
* - Clears wakeup handler (if RT_USING_SMART is defined)
* - Inserts thread into ready queue
*
* @note Must be called with scheduler lock held
* May fail due to racing conditions with timeout ISR
*/
rt_err_t rt_sched_thread_ready(struct rt_thread *thread) rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
{ {
rt_err_t error; rt_err_t error;
@ -144,6 +269,24 @@ rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
return error; return error;
} }
/**
* @brief Increase the system tick and update thread's remaining time slice
*
* @param tick The number of ticks to increase
* @return rt_err_t Always returns RT_EOK
*
* @details This function:
* - Gets the current thread
* - Locks the scheduler
* - Decreases the thread's remaining tick count by the specified amount
* - If remaining ticks reach zero:
* * Calls rt_sched_thread_yield() to yield the thread
* * Requests a reschedule with rt_sched_unlock_n_resched()
* - Otherwise simply unlocks the scheduler
*
* @note This function is typically called from timer interrupt context
* It handles both SMP and non-SMP cases
*/
rt_err_t rt_sched_tick_increase(rt_tick_t tick) rt_err_t rt_sched_tick_increase(rt_tick_t tick)
{ {
struct rt_thread *thread; struct rt_thread *thread;
@ -178,7 +321,26 @@ rt_err_t rt_sched_tick_increase(rt_tick_t tick)
} }
/** /**
* @brief Update priority of the target thread * @brief Update thread priority and adjust scheduling attributes
*
* @param thread The thread to update priority for
* @param priority New priority value to set
* @param update_init_prio Flag to determine if initial priority should also be updated
* @return rt_err_t Always returns RT_EOK on success
*
* @details This function:
* - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
* - For ready threads:
* * Removes from ready queue
* * Updates priority values
* * Recalculates priority attributes (number, mask, etc.)
* * Reinserts into ready queue with new priority
* - For non-ready threads:
* * Only updates priority values and attributes
* - Handles both 32-bit and >32-bit priority systems
*
* @note Must be called with scheduler lock held
* Thread status must be valid before calling
*/ */
static rt_err_t _rt_sched_update_priority(struct rt_thread *thread, rt_uint8_t priority, rt_bool_t update_init_prio) static rt_err_t _rt_sched_update_priority(struct rt_thread *thread, rt_uint8_t priority, rt_bool_t update_init_prio)
{ {
@ -249,6 +411,19 @@ rt_err_t rt_sched_thread_reset_priority(struct rt_thread *thread, rt_uint8_t pri
} }
#ifdef RT_USING_OVERFLOW_CHECK #ifdef RT_USING_OVERFLOW_CHECK
/**
* @brief Check thread stack for overflow or near-overflow conditions
*
* @param thread The thread to check stack for
*
* @details This function performs the following checks:
* - For SMART mode without MMU: skips check if SP is in user data section
* - Without hardware stack guard:
* * For upward-growing stacks: checks magic number at top and SP range
* * For downward-growing stacks: checks magic number at bottom and SP range
* * Triggers error and infinite loop on overflow
* - Additional warnings when stack pointer is near boundaries
*/
void rt_scheduler_stack_check(struct rt_thread *thread) void rt_scheduler_stack_check(struct rt_thread *thread)
{ {
RT_ASSERT(thread != RT_NULL); RT_ASSERT(thread != RT_NULL);
@ -306,4 +481,4 @@ void rt_scheduler_stack_check(struct rt_thread *thread)
#endif /* ARCH_CPU_STACK_GROWS_UPWARD */ #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
} }
#endif /* RT_USING_OVERFLOW_CHECK */ #endif /* RT_USING_OVERFLOW_CHECK */

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006-2023, RT-Thread Development Team * Copyright (c) 2006-2025 RT-Thread Development Team
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -398,7 +398,20 @@ static void _sched_remove_thread_locked(struct rt_thread *thread)
} }
/** /**
* @brief This function will initialize the system scheduler. * @brief Initialize the system scheduler.
*
* @details This function performs the following initialization tasks:
* - Initializes the global scheduler spinlock for multi-core synchronization.
* - Initializes the global priority table for saving ready threads of all priority levels.
* - For each CPU core:
* * Initializes per-CPU priority tables
* * Sets initial CPU state (irq_switch_flag, current_priority, etc.)
* * Initializes per-CPU ready tables (if priority > 32)
* * Initializes per-CPU spinlock (if RT_USING_SMART is defined)
* - Initializes the global ready priority group and tables (if priority > 32) as bitmaps for all priorities.
*
* @note This function must be called before any thread scheduling can occur.
* It prepares the scheduler data structures for multi-core operation.
*/ */
void rt_system_scheduler_init(void) void rt_system_scheduler_init(void)
{ {
@ -447,8 +460,20 @@ void rt_system_scheduler_init(void)
} }
/** /**
* @brief This function will startup the scheduler. It will select one thread * @brief Start the system scheduler and switch to the highest priority thread.
* with the highest priority level, then switch to it. *
* @details This function performs the following operations:
* - Releases legacy CPU lock (if any)
* - Disables interrupts to ensure atomic operation
* - Acquires scheduler lock for thread safety
* - Selects the highest priority thread from ready queue
* - Removes the selected thread from ready queue
* - Assigns current CPU core to the selected thread
* - Performs context switch to the selected thread
*
* @note This function will not return after successful execution.
* It performs the initial thread switch during system startup.
* The scheduler must be initialized before calling this function.
*/ */
void rt_system_scheduler_start(void) void rt_system_scheduler_start(void)
{ {
@ -549,6 +574,14 @@ rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Check if the scheduler is currently locked
*
* @return rt_bool_t Returns RT_TRUE if scheduler is locked, RT_FALSE otherwise
*
* @note This function checks the scheduler lock status in a thread-safe manner
* by temporarily disabling interrupts to get consistent state.
*/
rt_bool_t rt_sched_is_locked(void) rt_bool_t rt_sched_is_locked(void)
{ {
rt_bool_t rc; rt_bool_t rc;
@ -662,6 +695,14 @@ static rt_thread_t _prepare_context_switch_locked(int cpu_id,
} }
#ifdef RT_USING_SIGNALS #ifdef RT_USING_SIGNALS
/**
* @brief Preprocess pending signals for a suspended thread
*
* @param current_thread The thread to check for pending signals
*
* @details This function checks if the specified thread is suspended and has pending signals.
* If both conditions are met, it will wake up/resume the thread to process the signals.
*/
static void _sched_thread_preprocess_signal(struct rt_thread *current_thread) static void _sched_thread_preprocess_signal(struct rt_thread *current_thread)
{ {
/* should process signal? */ /* should process signal? */
@ -679,6 +720,20 @@ static void _sched_thread_preprocess_signal(struct rt_thread *current_thread)
} }
} }
/**
* @brief Process pending signals for the current thread
*
* @param current_thread The thread to process signals for
*
* @details This function:
* - Locks the scheduler to ensure thread safety
* - Checks if the thread has pending signals
* - If signals are pending:
* * Clears the pending flag
* * Unlocks the scheduler
* * Calls signal handler to process the signals
* - If no signals pending, simply unlocks the scheduler
*/
static void _sched_thread_process_signal(struct rt_thread *current_thread) static void _sched_thread_process_signal(struct rt_thread *current_thread)
{ {
rt_base_t level; rt_base_t level;
@ -722,6 +777,27 @@ static void _sched_thread_process_signal(struct rt_thread *current_thread)
#define SCHED_THREAD_PROCESS_SIGNAL(curthr) #define SCHED_THREAD_PROCESS_SIGNAL(curthr)
#endif /* RT_USING_SIGNALS */ #endif /* RT_USING_SIGNALS */
/**
* @brief Unlock scheduler and perform rescheduling if needed
*
* @param level The scheduler lock level obtained from rt_sched_lock()
*
* @return rt_err_t
* - RT_EOK: Success
* - -RT_EBUSY: Scheduler not available
* - -RT_ESCHEDISR: Called in interrupt context
* - -RT_ESCHEDLOCKED: Scheduler still locked by others
*
* @details This function:
* - Releases scheduler lock at specified level
* - Checks if rescheduling is needed
* - If needed, finds highest priority thread and switches to it
* - Processes pending signals for current thread
* - Handles various error conditions
*
* @note Must be called in pair with rt_sched_lock()
* May trigger context switch if conditions met
*/
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level) rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
{ {
struct rt_thread *to_thread; struct rt_thread *to_thread;
@ -807,6 +883,15 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
* @brief This function will perform one scheduling. It will select one thread * @brief This function will perform one scheduling. It will select one thread
* with the highest priority level in global ready queue or local ready queue, * with the highest priority level in global ready queue or local ready queue,
* then switch to it. * then switch to it.
*
* @details This function performs the following operations:
* - Disables interrupts to enter critical section
* - Gets current CPU and thread context
* - Checks if called from interrupt context
* - Finds highest priority ready thread
* - Performs context switch if needed
* - Processes pending signals
* - Restores interrupt state
*/ */
void rt_schedule(void) void rt_schedule(void)
{ {
@ -890,9 +975,16 @@ void rt_schedule(void)
} }
/** /**
* @brief This function checks whether a scheduling is needed after an IRQ context switching. If yes, * @brief Perform thread scheduling after an interrupt context switch
* it will select one thread with the highest priority level, and then switch *
* to it. * @param context The interrupt context pointer
*
* @details This function handles scheduling when returning from interrupt context:
* - Checks if scheduling is needed (irq_switch_flag set)
* - If needed, finds highest priority ready thread
* - Performs context switch to new thread if available
* - Handles cases where scheduler is locked or still in interrupt context
* - Processes pending signals before scheduling
*/ */
void rt_scheduler_do_irq_switch(void *context) void rt_scheduler_do_irq_switch(void *context)
{ {
@ -1002,8 +1094,20 @@ void rt_sched_remove_thread(struct rt_thread *thread)
RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE; RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
} }
/* thread status initialization and setting up on startup */ /**
* @brief Initialize thread's scheduling private data
*
* @param thread The thread to be initialized
* @param tick Initial time slice value for the thread
* @param priority Initial priority of the thread
*
* @details This function performs the following initialization:
* - Initializes thread's ready list node
* - Sets initial and current priority (must be < RT_THREAD_PRIORITY_MAX)
* - Initializes priority bitmasks (handles >32 priorities if needed)
* - Sets initial time slice values
* - For SMP systems, initializes critical section nesting counter
*/
void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority) void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
{ {
rt_list_init(&RT_THREAD_LIST_NODE(thread)); rt_list_init(&RT_THREAD_LIST_NODE(thread));
@ -1032,7 +1136,22 @@ void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_ui
} }
/* Normally, there isn't anyone racing with us so this operation is lockless */ /**
* @brief Initialize thread scheduling attributes for startup
*
* @param thread The thread to be initialized
*
* @details This function:
* - For systems with >32 priorities:
* * Sets priority number (5 bits)
* * Initializes number_mask and high_mask (3 bits)
* - For systems with <=32 priorities:
* * Initializes number_mask
* - Sets thread state to SUSPEND
*
* @note This is a lockless operation as it's called during thread creation
* when no concurrent access is possible
*/
void rt_sched_thread_startup(struct rt_thread *thread) void rt_sched_thread_startup(struct rt_thread *thread)
{ {
#if RT_THREAD_PRIORITY_MAX > 32 #if RT_THREAD_PRIORITY_MAX > 32
@ -1048,9 +1167,19 @@ void rt_sched_thread_startup(struct rt_thread *thread)
} }
/** /**
* @brief Update scheduling status of thread. this operation is taken as an * @brief Update thread scheduling status after context switch
* atomic operation of the update of SP. Since the local irq is disabled, *
* it's okay to assume that the stack will not be modified meanwhile. * @param thread The thread that will be running after the context switch
*
* @details This function performs critical post-context-switch operations:
* - Verifies interrupts are disabled (RT_ASSERT)
* - Releases scheduler lock from previous thread if exists
* - Updates current thread pointer for the CPU
* - Must be called with interrupts disabled
*
* @note this operation is taken as an atomic operation of the update of SP.
* Since the local irq is disabled, it's okay to assume that the stack
* will not be modified meanwhile.
*/ */
void rt_sched_post_ctx_switch(struct rt_thread *thread) void rt_sched_post_ctx_switch(struct rt_thread *thread)
{ {
@ -1075,6 +1204,21 @@ void rt_sched_post_ctx_switch(struct rt_thread *thread)
static volatile int _critical_error_occurred = 0; static volatile int _critical_error_occurred = 0;
/**
* @brief Safely exit critical section with level checking
*
* @param critical_level The expected critical level to match
*
* @details This function provides a safe way to exit critical sections by:
* - Verifying the current critical level matches the expected level
* - If mismatch detected (debug build only):
* * Prints error message with current and expected levels
* * Triggers backtrace for debugging
* * Enters infinite loop to halt execution
* - Always calls rt_exit_critical() to ensure critical section is exited
*
* @note This is primarily used for debugging critical section mismatches.
*/
void rt_exit_critical_safe(rt_base_t critical_level) void rt_exit_critical_safe(rt_base_t critical_level)
{ {
struct rt_cpu *pcpu = rt_cpu_self(); struct rt_cpu *pcpu = rt_cpu_self();
@ -1100,6 +1244,14 @@ void rt_exit_critical_safe(rt_base_t critical_level)
#else /* !RT_DEBUGING_CRITICAL */ #else /* !RT_DEBUGING_CRITICAL */
/**
* @brief Safely exit critical section (non-debug version)
*
* @param critical_level The expected critical level (unused in non-debug build)
*
* @details This is the non-debug version that simply calls rt_exit_critical().
* The critical_level parameter is ignored in this implementation.
*/
void rt_exit_critical_safe(rt_base_t critical_level) void rt_exit_critical_safe(rt_base_t critical_level)
{ {
RT_UNUSED(critical_level); RT_UNUSED(critical_level);
@ -1122,7 +1274,20 @@ RTM_EXPORT(rt_exit_critical_safe);
#endif /* ARCH_USING_HW_THREAD_SELF */ #endif /* ARCH_USING_HW_THREAD_SELF */
/** /**
* @brief This function will lock the thread scheduler. * @brief Enter a critical section and lock the scheduler
*
* @return The critical level after entering (nesting count)
* Returns -RT_EINVAL if scheduler is unavailable
*
* @details This function:
* - Disables interrupts to ensure atomic operation
* - Gets current thread context
* - Increments critical section nesting counter
* - Returns the new nesting level
* - Handles both hardware-assisted and software thread context cases
*
* @note Must be paired with rt_exit_critical()
* Can be nested, each call must have matching exit call
*/ */
rt_base_t rt_enter_critical(void) rt_base_t rt_enter_critical(void)
{ {
@ -1162,7 +1327,21 @@ rt_base_t rt_enter_critical(void)
RTM_EXPORT(rt_enter_critical); RTM_EXPORT(rt_enter_critical);
/** /**
* @brief This function will unlock the thread scheduler. * @brief Exit a critical section and unlock the scheduler
*
* @details This function performs the following operations:
* - Gets current thread context (using hardware-assisted or software method)
* - Decrements critical section nesting counter
* - If nesting level reaches 0:
* * Checks if rescheduling is needed
* * Clears critical switch flag
* * Performs rescheduling if needed
* - Verifies critical section nesting is valid (debug assertion)
* - Restores interrupt state
*
* @note Must be called in pair with rt_enter_critical()
* Handles both hardware-assisted and software thread context cases
* May trigger rescheduling if conditions met
*/ */
void rt_exit_critical(void) void rt_exit_critical(void)
{ {
@ -1245,6 +1424,30 @@ rt_uint16_t rt_critical_level(void)
} }
RTM_EXPORT(rt_critical_level); RTM_EXPORT(rt_critical_level);
/**
* @brief Bind a thread to a specific CPU core
*
* @param thread The thread to be bound
* @param cpu The target CPU core index (RT_CPUS_NR for no binding)
*
* @return rt_err_t
* - RT_EOK: Operation successful
*
* @details This function handles thread-CPU binding with the following logic:
* - If thread is READY:
* * Removes from current ready queue
* * Updates bind CPU information
* * Inserts to new ready queue
* * Triggers rescheduling if needed
* - If thread is RUNNING:
* * Updates bind CPU information
* * Sends IPI to target CPU if binding changed
* * Triggers rescheduling if needed
* - For other states, just updates bind CPU info
*
* @note Caller must ensure scheduler is not locked before calling
* This function will acquire scheduler lock internally
*/
rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu) rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
{ {
rt_sched_lock_level_t slvl; rt_sched_lock_level_t slvl;
@ -1329,4 +1532,4 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
} }
/**@}*/ /**@}*/
/**@endcond*/ /**@endcond*/

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006-2023, RT-Thread Development Team * Copyright (c) 2006-2025 RT-Thread Development Team
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -108,6 +108,23 @@ static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *high
return highest_priority_thread; return highest_priority_thread;
} }
/**
* @brief Lock the scheduler and save the interrupt level
*
* @param plvl Pointer to store the interrupt level before locking
*
* @return rt_err_t
* - RT_EOK on success
* - -RT_EINVAL if plvl is NULL
*
* @details This function:
* - Disables interrupts to prevent preemption
* - Saves the previous interrupt level in plvl
* - Must be paired with rt_sched_unlock() to restore interrupts
*
* @note The lock is implemented by disabling interrupts
* Caller must ensure plvl is valid
*/
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl) rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
{ {
rt_base_t level; rt_base_t level;
@ -120,6 +137,19 @@ rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Unlock the scheduler and restore the interrupt level
*
* @param level The interrupt level to restore (previously saved by rt_sched_lock)
* @return rt_err_t Always returns RT_EOK
*
* @details This function:
* - Restores the interrupt level that was saved when locking the scheduler
* - Must be called to match each rt_sched_lock() call
*
* @note Must be called with the same interrupt level that was saved by rt_sched_lock()
* Should not be called without a corresponding rt_sched_lock() first
*/
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level) rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
{ {
rt_hw_interrupt_enable(level); rt_hw_interrupt_enable(level);
@ -127,6 +157,17 @@ rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
return RT_EOK; return RT_EOK;
} }
/**
* @brief Unlock scheduler and trigger a reschedule if needed
*
* @param level The interrupt level to restore (previously saved by rt_sched_lock)
* @return rt_err_t Always returns RT_EOK
*
* @details This function:
* - Restores the interrupt level that was saved when locking the scheduler
* - Triggers a reschedule if the scheduler is available (rt_thread_self() != NULL)
* - Combines the functionality of rt_sched_unlock() and rt_schedule()
*/
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level) rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
{ {
if (rt_thread_self()) if (rt_thread_self())
@ -140,7 +181,16 @@ rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
} }
/** /**
* @brief This function will initialize the system scheduler. * @brief Initialize the system scheduler for single-core systems
*
* @details This function performs the following initialization tasks:
* - Resets the scheduler lock nest counter to 0
* - Initializes the priority table for all priority levels
* - Clears the ready priority group bitmap
* - For systems with >32 priority levels, initializes the ready table
*
* @note This function must be called before any thread scheduling can occur.
* It prepares the scheduler data structures for single-core operation
*/ */
void rt_system_scheduler_init(void) void rt_system_scheduler_init(void)
{ {
@ -165,8 +215,17 @@ void rt_system_scheduler_init(void)
} }
/** /**
* @brief This function will startup the scheduler. It will select one thread * @brief Start the system scheduler and switch to the highest priority thread
* with the highest priority level, then switch to it. *
* @details This function:
* - Gets the highest priority ready thread using _scheduler_get_highest_priority_thread()
* - Sets it as the current thread for the CPU
* - Removes the thread from ready queue and sets its status to RUNNING
* - Performs a context switch to the selected thread using rt_hw_context_switch_to()
*
* @note This function does not return as it switches to the first thread to run.
* Must be called after rt_system_scheduler_init().
* The selected thread will begin execution immediately
*/ */
void rt_system_scheduler_start(void) void rt_system_scheduler_start(void)
{ {
@ -195,8 +254,18 @@ void rt_system_scheduler_start(void)
/**@{*/ /**@{*/
/** /**
* @brief This function will perform scheduling once. It will select one thread * @brief Perform thread scheduling once. Select the highest priority thread and switch to it.
* with the highest priority, and switch to it immediately. *
* @details This function:
* - Disables interrupts to prevent preemption during scheduling
* - Checks if scheduler is enabled (lock_nest == 0)
* - Gets the highest priority ready thread
* - Determines if current thread should continue running or be preempted
* - Performs context switch if needed:
* * From current thread to new thread (normal case)
* * Handles special cases like interrupt context switches
* - Manages thread states (READY/RUNNING) and priority queues
* - Handles thread yield flags and signal processing
*/ */
void rt_schedule(void) void rt_schedule(void)
{ {
@ -326,7 +395,24 @@ __exit:
return; return;
} }
/* Normally, there isn't anyone racing with us so this operation is lockless */ /**
* @brief Initialize thread scheduling attributes for startup
*
* @param thread The thread to be initialized
*
* @details This function:
* - For systems with >32 priority levels:
* * Sets the thread's priority group number (5 bits)
* * Creates number mask for the priority group
* * Creates high mask for the specific priority (3 bits)
* - For systems with <=32 priority levels:
* * Creates a simple number mask for the priority
* - Sets thread state to SUSPEND to prepare for later activation
*
* @note This function must be called before a thread can be scheduled.
* It prepares the thread's priority-related data structures.
* Normally, there isn't anyone racing with us so this operation is lockless
*/
void rt_sched_thread_startup(struct rt_thread *thread) void rt_sched_thread_startup(struct rt_thread *thread)
{ {
#if RT_THREAD_PRIORITY_MAX > 32 #if RT_THREAD_PRIORITY_MAX > 32
@ -341,6 +427,19 @@ void rt_sched_thread_startup(struct rt_thread *thread)
RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND; RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
} }
/**
* @brief Initialize thread's scheduling private data
*
* @param thread Pointer to the thread control block
* @param tick Initial time slice value for the thread
* @param priority Initial priority of the thread
*
* @details This function:
* - Initializes the thread's list node
* - Sets initial and current priority (must be < RT_THREAD_PRIORITY_MAX)
* - Initializes priority masks (number_mask, number, high_mask for >32 priorities)
* - Sets initial and remaining time slice ticks
*/
void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority) void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
{ {
rt_list_init(&RT_THREAD_LIST_NODE(thread)); rt_list_init(&RT_THREAD_LIST_NODE(thread));
@ -458,6 +557,22 @@ void rt_sched_remove_thread(struct rt_thread *thread)
static volatile int _critical_error_occurred = 0; static volatile int _critical_error_occurred = 0;
/**
* @brief Safely exit critical section with level checking
*
* @param critical_level The expected critical level to match current lock nest
*
* @details This function:
* - Disables interrupts to prevent preemption during check
* - Verifies the provided critical_level matches current rt_scheduler_lock_nest
* - If mismatch detected (debug mode only):
* * Sets error flag
* * Prints debug information including backtrace
* * Enters infinite loop to halt system
* - Always calls rt_exit_critical() to perform actual exit
*
* @note This is a debug version that adds safety checks for critical section exit.
*/
void rt_exit_critical_safe(rt_base_t critical_level) void rt_exit_critical_safe(rt_base_t critical_level)
{ {
rt_base_t level; rt_base_t level;
@ -487,6 +602,14 @@ void rt_exit_critical_safe(rt_base_t critical_level)
#else /* !RT_DEBUGING_CRITICAL */ #else /* !RT_DEBUGING_CRITICAL */
/**
* @brief Safely exit critical section (non-debug version)
*
* @param critical_level The expected critical level (unused in non-debug build)
*
* @details This is the non-debug version that simply calls rt_exit_critical().
* The critical_level parameter is ignored in this implementation.
*/
void rt_exit_critical_safe(rt_base_t critical_level) void rt_exit_critical_safe(rt_base_t critical_level)
{ {
rt_exit_critical(); rt_exit_critical();
@ -496,7 +619,19 @@ void rt_exit_critical_safe(rt_base_t critical_level)
RTM_EXPORT(rt_exit_critical_safe); RTM_EXPORT(rt_exit_critical_safe);
/** /**
* @brief This function will lock the thread scheduler. * @brief Enter critical section and lock the scheduler
*
* @return rt_base_t The current critical level (nesting count)
*
* @details This function:
* - Disables interrupts to prevent preemption
* - Increments the scheduler lock nesting count
* - Returns the new nesting count as critical level
* - Re-enables interrupts while maintaining the lock
*
* @note The nesting count can go up to RT_UINT16_MAX.
* Must be paired with rt_exit_critical().
* Interrupts are only disabled during the lock operation.
*/ */
rt_base_t rt_enter_critical(void) rt_base_t rt_enter_critical(void)
{ {
@ -521,7 +656,20 @@ rt_base_t rt_enter_critical(void)
RTM_EXPORT(rt_enter_critical); RTM_EXPORT(rt_enter_critical);
/** /**
* @brief This function will unlock the thread scheduler. * @brief Exit critical section and unlock scheduler
*
* @details This function:
* - Decrements the scheduler lock nesting count
* - If nesting count reaches zero:
* * Resets the nesting count
* * Re-enables interrupts
* * Triggers a scheduler run if current thread exists
* - If nesting count still positive:
* * Just re-enables interrupts while maintaining lock
*
* @note Must be paired with rt_enter_critical().
* Interrupts are only disabled during the lock operation.
* Scheduling only occurs when fully unlocked (nest=0)
*/ */
void rt_exit_critical(void) void rt_exit_critical(void)
{ {
@ -568,4 +716,4 @@ rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
} }
/**@}*/ /**@}*/
/**@endcond*/ /**@endcond*/