std/sys/pal/unix/
thread.rs

1use crate::ffi::CStr;
2use crate::mem::{self, ManuallyDrop};
3use crate::num::NonZero;
4#[cfg(all(target_os = "linux", target_env = "gnu"))]
5use crate::sys::weak::dlsym;
6#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto",))]
7use crate::sys::weak::weak;
8use crate::sys::{os, stack_overflow};
9use crate::time::{Duration, Instant};
10use crate::{cmp, io, ptr};
11#[cfg(not(any(
12    target_os = "l4re",
13    target_os = "vxworks",
14    target_os = "espidf",
15    target_os = "nuttx"
16)))]
17pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
18#[cfg(target_os = "l4re")]
19pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
20#[cfg(target_os = "vxworks")]
21pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
22#[cfg(any(target_os = "espidf", target_os = "nuttx"))]
23pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF/NuttX menuconfig system should be used
24
25struct ThreadData {
26    name: Option<Box<str>>,
27    f: Box<dyn FnOnce()>,
28}
29
30pub struct Thread {
31    id: libc::pthread_t,
32}
33
34// Some platforms may have pthread_t as a pointer in which case we still want
35// a thread to be Send/Sync
36unsafe impl Send for Thread {}
37unsafe impl Sync for Thread {}
38
39impl Thread {
40    // unsafe: see thread::Builder::spawn_unchecked for safety requirements
41    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
42    pub unsafe fn new(
43        stack: usize,
44        name: Option<&str>,
45        f: Box<dyn FnOnce()>,
46    ) -> io::Result<Thread> {
47        let data = Box::into_raw(Box::new(ThreadData { name: name.map(Box::from), f }));
48        let mut native: libc::pthread_t = mem::zeroed();
49        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
50        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
51
52        #[cfg(any(target_os = "espidf", target_os = "nuttx"))]
53        if stack > 0 {
54            // Only set the stack if a non-zero value is passed
55            // 0 is used as an indication that the default stack size configured in the ESP-IDF/NuttX menuconfig system should be used
56            assert_eq!(
57                libc::pthread_attr_setstacksize(
58                    attr.as_mut_ptr(),
59                    cmp::max(stack, min_stack_size(attr.as_ptr()))
60                ),
61                0
62            );
63        }
64
65        #[cfg(not(any(target_os = "espidf", target_os = "nuttx")))]
66        {
67            let stack_size = cmp::max(stack, min_stack_size(attr.as_ptr()));
68
69            match libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size) {
70                0 => {}
71                n => {
72                    assert_eq!(n, libc::EINVAL);
73                    // EINVAL means |stack_size| is either too small or not a
74                    // multiple of the system page size. Because it's definitely
75                    // >= PTHREAD_STACK_MIN, it must be an alignment issue.
76                    // Round up to the nearest page and try again.
77                    let page_size = os::page_size();
78                    let stack_size =
79                        (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
80
81                    // Some libc implementations, e.g. musl, place an upper bound
82                    // on the stack size, in which case we can only gracefully return
83                    // an error here.
84                    if libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size) != 0 {
85                        assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
86                        drop(Box::from_raw(data));
87                        return Err(io::const_error!(
88                            io::ErrorKind::InvalidInput,
89                            "invalid stack size"
90                        ));
91                    }
92                }
93            };
94        }
95
96        let ret = libc::pthread_create(&mut native, attr.as_ptr(), thread_start, data as *mut _);
97        // Note: if the thread creation fails and this assert fails, then p will
98        // be leaked. However, an alternative design could cause double-free
99        // which is clearly worse.
100        assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
101
102        return if ret != 0 {
103            // The thread failed to start and as a result p was not consumed. Therefore, it is
104            // safe to reconstruct the box so that it gets deallocated.
105            drop(Box::from_raw(data));
106            Err(io::Error::from_raw_os_error(ret))
107        } else {
108            Ok(Thread { id: native })
109        };
110
111        extern "C" fn thread_start(data: *mut libc::c_void) -> *mut libc::c_void {
112            unsafe {
113                let data = Box::from_raw(data as *mut ThreadData);
114                // Next, set up our stack overflow handler which may get triggered if we run
115                // out of stack.
116                let _handler = stack_overflow::Handler::new(data.name);
117                // Finally, let's run some code.
118                (data.f)();
119            }
120            ptr::null_mut()
121        }
122    }
123
124    pub fn yield_now() {
125        let ret = unsafe { libc::sched_yield() };
126        debug_assert_eq!(ret, 0);
127    }
128
129    #[cfg(target_os = "android")]
130    pub fn set_name(name: &CStr) {
131        const PR_SET_NAME: libc::c_int = 15;
132        unsafe {
133            let res = libc::prctl(
134                PR_SET_NAME,
135                name.as_ptr(),
136                0 as libc::c_ulong,
137                0 as libc::c_ulong,
138                0 as libc::c_ulong,
139            );
140            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
141            debug_assert_eq!(res, 0);
142        }
143    }
144
145    #[cfg(any(
146        target_os = "linux",
147        target_os = "freebsd",
148        target_os = "dragonfly",
149        target_os = "nuttx",
150        target_os = "cygwin"
151    ))]
152    pub fn set_name(name: &CStr) {
153        unsafe {
154            cfg_if::cfg_if! {
155                if #[cfg(any(target_os = "linux", target_os = "cygwin"))] {
156                    // Linux and Cygwin limits the allowed length of the name.
157                    const TASK_COMM_LEN: usize = 16;
158                    let name = truncate_cstr::<{ TASK_COMM_LEN }>(name);
159                } else {
160                    // FreeBSD, DragonFly BSD and NuttX do not enforce length limits.
161                }
162            };
163            // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20 for Linux,
164            // FreeBSD 12.2 and 13.0, and DragonFly BSD 6.0.
165            let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
166            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
167            debug_assert_eq!(res, 0);
168        }
169    }
170
171    #[cfg(target_os = "openbsd")]
172    pub fn set_name(name: &CStr) {
173        unsafe {
174            libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
175        }
176    }
177
178    #[cfg(target_vendor = "apple")]
179    pub fn set_name(name: &CStr) {
180        unsafe {
181            let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name);
182            let res = libc::pthread_setname_np(name.as_ptr());
183            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
184            debug_assert_eq!(res, 0);
185        }
186    }
187
188    #[cfg(target_os = "netbsd")]
189    pub fn set_name(name: &CStr) {
190        unsafe {
191            let res = libc::pthread_setname_np(
192                libc::pthread_self(),
193                c"%s".as_ptr(),
194                name.as_ptr() as *mut libc::c_void,
195            );
196            debug_assert_eq!(res, 0);
197        }
198    }
199
200    #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))]
201    pub fn set_name(name: &CStr) {
202        weak!(
203            fn pthread_setname_np(
204                thread: libc::pthread_t,
205                name: *const libc::c_char,
206            ) -> libc::c_int;
207        );
208
209        if let Some(f) = pthread_setname_np.get() {
210            #[cfg(target_os = "nto")]
211            const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize;
212            #[cfg(any(target_os = "solaris", target_os = "illumos"))]
213            const THREAD_NAME_MAX: usize = 32;
214
215            let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name);
216            let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
217            debug_assert_eq!(res, 0);
218        }
219    }
220
221    #[cfg(target_os = "fuchsia")]
222    pub fn set_name(name: &CStr) {
223        use super::fuchsia::*;
224        unsafe {
225            zx_object_set_property(
226                zx_thread_self(),
227                ZX_PROP_NAME,
228                name.as_ptr() as *const libc::c_void,
229                name.to_bytes().len(),
230            );
231        }
232    }
233
234    #[cfg(target_os = "haiku")]
235    pub fn set_name(name: &CStr) {
236        unsafe {
237            let thread_self = libc::find_thread(ptr::null_mut());
238            let res = libc::rename_thread(thread_self, name.as_ptr());
239            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
240            debug_assert_eq!(res, libc::B_OK);
241        }
242    }
243
244    #[cfg(target_os = "vxworks")]
245    pub fn set_name(name: &CStr) {
246        let mut name = truncate_cstr::<{ (libc::VX_TASK_RENAME_LENGTH - 1) as usize }>(name);
247        let res = unsafe { libc::taskNameSet(libc::taskIdSelf(), name.as_mut_ptr()) };
248        debug_assert_eq!(res, libc::OK);
249    }
250
251    #[cfg(any(
252        target_env = "newlib",
253        target_os = "l4re",
254        target_os = "emscripten",
255        target_os = "redox",
256        target_os = "hurd",
257        target_os = "aix",
258    ))]
259    pub fn set_name(_name: &CStr) {
260        // Newlib and Emscripten have no way to set a thread name.
261    }
262
263    #[cfg(not(target_os = "espidf"))]
264    pub fn sleep(dur: Duration) {
265        let mut secs = dur.as_secs();
266        let mut nsecs = dur.subsec_nanos() as _;
267
268        // If we're awoken with a signal then the return value will be -1 and
269        // nanosleep will fill in `ts` with the remaining time.
270        unsafe {
271            while secs > 0 || nsecs > 0 {
272                let mut ts = libc::timespec {
273                    tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
274                    tv_nsec: nsecs,
275                };
276                secs -= ts.tv_sec as u64;
277                let ts_ptr = &raw mut ts;
278                if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
279                    assert_eq!(os::errno(), libc::EINTR);
280                    secs += ts.tv_sec as u64;
281                    nsecs = ts.tv_nsec;
282                } else {
283                    nsecs = 0;
284                }
285            }
286        }
287    }
288
289    #[cfg(target_os = "espidf")]
290    pub fn sleep(dur: Duration) {
291        // ESP-IDF does not have `nanosleep`, so we use `usleep` instead.
292        // As per the documentation of `usleep`, it is expected to support
293        // sleep times as big as at least up to 1 second.
294        //
295        // ESP-IDF does support almost up to `u32::MAX`, but due to a potential integer overflow in its
296        // `usleep` implementation
297        // (https://github.com/espressif/esp-idf/blob/d7ca8b94c852052e3bc33292287ef4dd62c9eeb1/components/newlib/time.c#L210),
298        // we limit the sleep time to the maximum one that would not cause the underlying `usleep` implementation to overflow
299        // (`portTICK_PERIOD_MS` can be anything between 1 to 1000, and is 10 by default).
300        const MAX_MICROS: u32 = u32::MAX - 1_000_000 - 1;
301
302        // Add any nanoseconds smaller than a microsecond as an extra microsecond
303        // so as to comply with the `std::thread::sleep` contract which mandates
304        // implementations to sleep for _at least_ the provided `dur`.
305        // We can't overflow `micros` as it is a `u128`, while `Duration` is a pair of
306        // (`u64` secs, `u32` nanos), where the nanos are strictly smaller than 1 second
307        // (i.e. < 1_000_000_000)
308        let mut micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 };
309
310        while micros > 0 {
311            let st = if micros > MAX_MICROS as u128 { MAX_MICROS } else { micros as u32 };
312            unsafe {
313                libc::usleep(st);
314            }
315
316            micros -= st as u128;
317        }
318    }
319
320    // Any unix that has clock_nanosleep
321    // If this list changes update the MIRI chock_nanosleep shim
322    #[cfg(any(
323        target_os = "freebsd",
324        target_os = "netbsd",
325        target_os = "linux",
326        target_os = "android",
327        target_os = "solaris",
328        target_os = "illumos",
329        target_os = "dragonfly",
330        target_os = "hurd",
331        target_os = "fuchsia",
332        target_os = "vxworks",
333    ))]
334    pub fn sleep_until(deadline: Instant) {
335        let Some(ts) = deadline.into_inner().into_timespec().to_timespec() else {
336            // The deadline is further in the future then can be passed to
337            // clock_nanosleep. We have to use Self::sleep instead. This might
338            // happen on 32 bit platforms, especially closer to 2038.
339            let now = Instant::now();
340            if let Some(delay) = deadline.checked_duration_since(now) {
341                Self::sleep(delay);
342            }
343            return;
344        };
345
346        unsafe {
347            // When we get interrupted (res = EINTR) call clock_nanosleep again
348            loop {
349                let res = libc::clock_nanosleep(
350                    super::time::Instant::CLOCK_ID,
351                    libc::TIMER_ABSTIME,
352                    &ts,
353                    core::ptr::null_mut(), // not required with TIMER_ABSTIME
354                );
355
356                if res == 0 {
357                    break;
358                } else {
359                    assert_eq!(
360                        res,
361                        libc::EINTR,
362                        "timespec is in range,
363                         clockid is valid and kernel should support it"
364                    );
365                }
366            }
367        }
368    }
369
370    // Any unix that does not have clock_nanosleep
371    #[cfg(not(any(
372        target_os = "freebsd",
373        target_os = "netbsd",
374        target_os = "linux",
375        target_os = "android",
376        target_os = "solaris",
377        target_os = "illumos",
378        target_os = "dragonfly",
379        target_os = "hurd",
380        target_os = "fuchsia",
381        target_os = "vxworks",
382    )))]
383    pub fn sleep_until(deadline: Instant) {
384        let now = Instant::now();
385        if let Some(delay) = deadline.checked_duration_since(now) {
386            Self::sleep(delay);
387        }
388    }
389
390    pub fn join(self) {
391        let id = self.into_id();
392        let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
393        assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
394    }
395
396    pub fn id(&self) -> libc::pthread_t {
397        self.id
398    }
399
400    pub fn into_id(self) -> libc::pthread_t {
401        ManuallyDrop::new(self).id
402    }
403}
404
405impl Drop for Thread {
406    fn drop(&mut self) {
407        let ret = unsafe { libc::pthread_detach(self.id) };
408        debug_assert_eq!(ret, 0);
409    }
410}
411
412pub(crate) fn current_os_id() -> Option<u64> {
413    // Most Unix platforms have a way to query an integer ID of the current thread, all with
414    // slightly different spellings.
415    //
416    // The OS thread ID is used rather than `pthread_self` so as to match what will be displayed
417    // for process inspection (debuggers, trace, `top`, etc.).
418    cfg_if::cfg_if! {
419        // Most platforms have a function returning a `pid_t` or int, which is an `i32`.
420        if #[cfg(any(target_os = "android", target_os = "linux"))] {
421            use crate::sys::weak::syscall;
422
423            // `libc::gettid` is only available on glibc 2.30+, but the syscall is available
424            // since Linux 2.4.11.
425            syscall!(fn gettid() -> libc::pid_t;);
426
427            // SAFETY: FFI call with no preconditions.
428            let id: libc::pid_t = unsafe { gettid() };
429            Some(id as u64)
430        } else if #[cfg(target_os = "nto")] {
431            // SAFETY: FFI call with no preconditions.
432            let id: libc::pid_t = unsafe { libc::gettid() };
433            Some(id as u64)
434        } else if #[cfg(target_os = "openbsd")] {
435            // SAFETY: FFI call with no preconditions.
436            let id: libc::pid_t = unsafe { libc::getthrid() };
437            Some(id as u64)
438        } else if #[cfg(target_os = "freebsd")] {
439            // SAFETY: FFI call with no preconditions.
440            let id: libc::c_int = unsafe { libc::pthread_getthreadid_np() };
441            Some(id as u64)
442        } else if #[cfg(target_os = "netbsd")] {
443            // SAFETY: FFI call with no preconditions.
444            let id: libc::lwpid_t = unsafe { libc::_lwp_self() };
445            Some(id as u64)
446        } else if #[cfg(any(target_os = "illumos", target_os = "solaris"))] {
447            // On Illumos and Solaris, the `pthread_t` is the same as the OS thread ID.
448            // SAFETY: FFI call with no preconditions.
449            let id: libc::pthread_t = unsafe { libc::pthread_self() };
450            Some(id as u64)
451        } else if #[cfg(target_vendor = "apple")] {
452            // Apple allows querying arbitrary thread IDs, `thread=NULL` queries the current thread.
453            let mut id = 0u64;
454            // SAFETY: `thread_id` is a valid pointer, no other preconditions.
455            let status: libc::c_int = unsafe { libc::pthread_threadid_np(0, &mut id) };
456            if status == 0 {
457                Some(id)
458            } else {
459                None
460            }
461        } else {
462            // Other platforms don't have an OS thread ID or don't have a way to access it.
463            None
464        }
465    }
466}
467
468#[cfg(any(
469    target_os = "linux",
470    target_os = "nto",
471    target_os = "solaris",
472    target_os = "illumos",
473    target_os = "vxworks",
474    target_os = "cygwin",
475    target_vendor = "apple",
476))]
477fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
478    let mut result = [0; MAX_WITH_NUL];
479    for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) {
480        *dst = *src as libc::c_char;
481    }
482    result
483}
484
485pub fn available_parallelism() -> io::Result<NonZero<usize>> {
486    cfg_if::cfg_if! {
487        if #[cfg(any(
488            target_os = "android",
489            target_os = "emscripten",
490            target_os = "fuchsia",
491            target_os = "hurd",
492            target_os = "linux",
493            target_os = "aix",
494            target_vendor = "apple",
495            target_os = "cygwin",
496        ))] {
497            #[allow(unused_assignments)]
498            #[allow(unused_mut)]
499            let mut quota = usize::MAX;
500
501            #[cfg(any(target_os = "android", target_os = "linux"))]
502            {
503                quota = cgroups::quota().max(1);
504                let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
505                unsafe {
506                    if libc::sched_getaffinity(0, size_of::<libc::cpu_set_t>(), &mut set) == 0 {
507                        let count = libc::CPU_COUNT(&set) as usize;
508                        let count = count.min(quota);
509
510                        // According to sched_getaffinity's API it should always be non-zero, but
511                        // some old MIPS kernels were buggy and zero-initialized the mask if
512                        // none was explicitly set.
513                        // In that case we use the sysconf fallback.
514                        if let Some(count) = NonZero::new(count) {
515                            return Ok(count)
516                        }
517                    }
518                }
519            }
520            match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
521                -1 => Err(io::Error::last_os_error()),
522                0 => Err(io::Error::UNKNOWN_THREAD_COUNT),
523                cpus => {
524                    let count = cpus as usize;
525                    // Cover the unusual situation where we were able to get the quota but not the affinity mask
526                    let count = count.min(quota);
527                    Ok(unsafe { NonZero::new_unchecked(count) })
528                }
529            }
530        } else if #[cfg(any(
531                   target_os = "freebsd",
532                   target_os = "dragonfly",
533                   target_os = "openbsd",
534                   target_os = "netbsd",
535               ))] {
536            use crate::ptr;
537
538            #[cfg(target_os = "freebsd")]
539            {
540                let mut set: libc::cpuset_t = unsafe { mem::zeroed() };
541                unsafe {
542                    if libc::cpuset_getaffinity(
543                        libc::CPU_LEVEL_WHICH,
544                        libc::CPU_WHICH_PID,
545                        -1,
546                        size_of::<libc::cpuset_t>(),
547                        &mut set,
548                    ) == 0 {
549                        let count = libc::CPU_COUNT(&set) as usize;
550                        if count > 0 {
551                            return Ok(NonZero::new_unchecked(count));
552                        }
553                    }
554                }
555            }
556
557            #[cfg(target_os = "netbsd")]
558            {
559                unsafe {
560                    let set = libc::_cpuset_create();
561                    if !set.is_null() {
562                        let mut count: usize = 0;
563                        if libc::pthread_getaffinity_np(libc::pthread_self(), libc::_cpuset_size(set), set) == 0 {
564                            for i in 0..libc::cpuid_t::MAX {
565                                match libc::_cpuset_isset(i, set) {
566                                    -1 => break,
567                                    0 => continue,
568                                    _ => count = count + 1,
569                                }
570                            }
571                        }
572                        libc::_cpuset_destroy(set);
573                        if let Some(count) = NonZero::new(count) {
574                            return Ok(count);
575                        }
576                    }
577                }
578            }
579
580            let mut cpus: libc::c_uint = 0;
581            let mut cpus_size = size_of_val(&cpus);
582
583            unsafe {
584                cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
585            }
586
587            // Fallback approach in case of errors or no hardware threads.
588            if cpus < 1 {
589                let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
590                let res = unsafe {
591                    libc::sysctl(
592                        mib.as_mut_ptr(),
593                        2,
594                        (&raw mut cpus) as *mut _,
595                        (&raw mut cpus_size) as *mut _,
596                        ptr::null_mut(),
597                        0,
598                    )
599                };
600
601                // Handle errors if any.
602                if res == -1 {
603                    return Err(io::Error::last_os_error());
604                } else if cpus == 0 {
605                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
606                }
607            }
608
609            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
610        } else if #[cfg(target_os = "nto")] {
611            unsafe {
612                use libc::_syspage_ptr;
613                if _syspage_ptr.is_null() {
614                    Err(io::const_error!(io::ErrorKind::NotFound, "no syspage available"))
615                } else {
616                    let cpus = (*_syspage_ptr).num_cpu;
617                    NonZero::new(cpus as usize)
618                        .ok_or(io::Error::UNKNOWN_THREAD_COUNT)
619                }
620            }
621        } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] {
622            let mut cpus = 0u32;
623            if unsafe { libc::pset_info(libc::PS_MYID, core::ptr::null_mut(), &mut cpus, core::ptr::null_mut()) } != 0 {
624                return Err(io::Error::UNKNOWN_THREAD_COUNT);
625            }
626            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
627        } else if #[cfg(target_os = "haiku")] {
628            // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
629            // `get_system_info` calls then `smp_get_num_cpus`
630            unsafe {
631                let mut sinfo: libc::system_info = crate::mem::zeroed();
632                let res = libc::get_system_info(&mut sinfo);
633
634                if res != libc::B_OK {
635                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
636                }
637
638                Ok(NonZero::new_unchecked(sinfo.cpu_count as usize))
639            }
640        } else if #[cfg(target_os = "vxworks")] {
641            // Note: there is also `vxCpuConfiguredGet`, closer to _SC_NPROCESSORS_CONF
642            // expectations than the actual cores availability.
643            unsafe extern "C" {
644                fn vxCpuEnabledGet() -> libc::cpuset_t;
645            }
646
647            // SAFETY: `vxCpuEnabledGet` always fetches a mask with at least one bit set
648            unsafe{
649                let set = vxCpuEnabledGet();
650                Ok(NonZero::new_unchecked(set.count_ones() as usize))
651            }
652        } else {
653            // FIXME: implement on Redox, l4re
654            Err(io::const_error!(io::ErrorKind::Unsupported, "getting the number of hardware threads is not supported on the target platform"))
655        }
656    }
657}
658
659#[cfg(any(target_os = "android", target_os = "linux"))]
660mod cgroups {
661    //! Currently not covered
662    //! * cgroup v2 in non-standard mountpoints
663    //! * paths containing control characters or spaces, since those would be escaped in procfs
664    //!   output and we don't unescape
665
666    use crate::borrow::Cow;
667    use crate::ffi::OsString;
668    use crate::fs::{File, exists};
669    use crate::io::{BufRead, Read};
670    use crate::os::unix::ffi::OsStringExt;
671    use crate::path::{Path, PathBuf};
672    use crate::str::from_utf8;
673
674    #[derive(PartialEq)]
675    enum Cgroup {
676        V1,
677        V2,
678    }
679
680    /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
681    /// be determined or is not set.
682    pub(super) fn quota() -> usize {
683        let mut quota = usize::MAX;
684        if cfg!(miri) {
685            // Attempting to open a file fails under default flags due to isolation.
686            // And Miri does not have parallelism anyway.
687            return quota;
688        }
689
690        let _: Option<()> = try {
691            let mut buf = Vec::with_capacity(128);
692            // find our place in the cgroup hierarchy
693            File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
694            let (cgroup_path, version) =
695                buf.split(|&c| c == b'\n').fold(None, |previous, line| {
696                    let mut fields = line.splitn(3, |&c| c == b':');
697                    // 2nd field is a list of controllers for v1 or empty for v2
698                    let version = match fields.nth(1) {
699                        Some(b"") => Cgroup::V2,
700                        Some(controllers)
701                            if from_utf8(controllers)
702                                .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
703                        {
704                            Cgroup::V1
705                        }
706                        _ => return previous,
707                    };
708
709                    // already-found v1 trumps v2 since it explicitly specifies its controllers
710                    if previous.is_some() && version == Cgroup::V2 {
711                        return previous;
712                    }
713
714                    let path = fields.last()?;
715                    // skip leading slash
716                    Some((path[1..].to_owned(), version))
717                })?;
718            let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
719
720            quota = match version {
721                Cgroup::V1 => quota_v1(cgroup_path),
722                Cgroup::V2 => quota_v2(cgroup_path),
723            };
724        };
725
726        quota
727    }
728
729    fn quota_v2(group_path: PathBuf) -> usize {
730        let mut quota = usize::MAX;
731
732        let mut path = PathBuf::with_capacity(128);
733        let mut read_buf = String::with_capacity(20);
734
735        // standard mount location defined in file-hierarchy(7) manpage
736        let cgroup_mount = "/sys/fs/cgroup";
737
738        path.push(cgroup_mount);
739        path.push(&group_path);
740
741        path.push("cgroup.controllers");
742
743        // skip if we're not looking at cgroup2
744        if matches!(exists(&path), Err(_) | Ok(false)) {
745            return usize::MAX;
746        };
747
748        path.pop();
749
750        let _: Option<()> = try {
751            while path.starts_with(cgroup_mount) {
752                path.push("cpu.max");
753
754                read_buf.clear();
755
756                if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
757                    let raw_quota = read_buf.lines().next()?;
758                    let mut raw_quota = raw_quota.split(' ');
759                    let limit = raw_quota.next()?;
760                    let period = raw_quota.next()?;
761                    match (limit.parse::<usize>(), period.parse::<usize>()) {
762                        (Ok(limit), Ok(period)) if period > 0 => {
763                            quota = quota.min(limit / period);
764                        }
765                        _ => {}
766                    }
767                }
768
769                path.pop(); // pop filename
770                path.pop(); // pop dir
771            }
772        };
773
774        quota
775    }
776
777    fn quota_v1(group_path: PathBuf) -> usize {
778        let mut quota = usize::MAX;
779        let mut path = PathBuf::with_capacity(128);
780        let mut read_buf = String::with_capacity(20);
781
782        // Hardcode commonly used locations mentioned in the cgroups(7) manpage
783        // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
784        let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
785            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
786            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
787            // this can be expensive on systems with tons of mountpoints
788            // but we only get to this point when /proc/self/cgroups explicitly indicated
789            // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
790            find_mountpoint,
791        ];
792
793        for mount in mounts {
794            let Some((mount, group_path)) = mount(&group_path) else { continue };
795
796            path.clear();
797            path.push(mount.as_ref());
798            path.push(&group_path);
799
800            // skip if we guessed the mount incorrectly
801            if matches!(exists(&path), Err(_) | Ok(false)) {
802                continue;
803            }
804
805            while path.starts_with(mount.as_ref()) {
806                let mut parse_file = |name| {
807                    path.push(name);
808                    read_buf.clear();
809
810                    let f = File::open(&path);
811                    path.pop(); // restore buffer before any early returns
812                    f.ok()?.read_to_string(&mut read_buf).ok()?;
813                    let parsed = read_buf.trim().parse::<usize>().ok()?;
814
815                    Some(parsed)
816                };
817
818                let limit = parse_file("cpu.cfs_quota_us");
819                let period = parse_file("cpu.cfs_period_us");
820
821                match (limit, period) {
822                    (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period),
823                    _ => {}
824                }
825
826                path.pop();
827            }
828
829            // we passed the try_exists above so we should have traversed the correct hierarchy
830            // when reaching this line
831            break;
832        }
833
834        quota
835    }
836
837    /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
838    ///
839    /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
840    /// over the already-included prefix
841    fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
842        let mut reader = File::open_buffered("/proc/self/mountinfo").ok()?;
843        let mut line = String::with_capacity(256);
844        loop {
845            line.clear();
846            if reader.read_line(&mut line).ok()? == 0 {
847                break;
848            }
849
850            let line = line.trim();
851            let mut items = line.split(' ');
852
853            let sub_path = items.nth(3)?;
854            let mount_point = items.next()?;
855            let mount_opts = items.next_back()?;
856            let filesystem_type = items.nth_back(1)?;
857
858            if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
859                // not a cgroup / not a cpu-controller
860                continue;
861            }
862
863            let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
864
865            if !group_path.starts_with(sub_path) {
866                // this is a bind-mount and the bound subdirectory
867                // does not contain the cgroup this process belongs to
868                continue;
869            }
870
871            let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
872
873            return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
874        }
875
876        None
877    }
878}
879
880// glibc >= 2.15 has a __pthread_get_minstack() function that returns
881// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
882// We need that information to avoid blowing up when a small stack
883// is created in an application with big thread-local storage requirements.
884// See #6233 for rationale and details.
885#[cfg(all(target_os = "linux", target_env = "gnu"))]
886unsafe fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
887    // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
888    // We shouldn't really be using such an internal symbol, but there's currently
889    // no other way to account for the TLS size.
890    dlsym!(
891        fn __pthread_get_minstack(attr: *const libc::pthread_attr_t) -> libc::size_t;
892    );
893
894    match __pthread_get_minstack.get() {
895        None => libc::PTHREAD_STACK_MIN,
896        Some(f) => unsafe { f(attr) },
897    }
898}
899
900// No point in looking up __pthread_get_minstack() on non-glibc platforms.
901#[cfg(all(
902    not(all(target_os = "linux", target_env = "gnu")),
903    not(any(target_os = "netbsd", target_os = "nuttx"))
904))]
905unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
906    libc::PTHREAD_STACK_MIN
907}
908
909#[cfg(any(target_os = "netbsd", target_os = "nuttx"))]
910unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
911    static STACK: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
912
913    *STACK.get_or_init(|| {
914        let mut stack = unsafe { libc::sysconf(libc::_SC_THREAD_STACK_MIN) };
915        if stack < 0 {
916            stack = 2048; // just a guess
917        }
918
919        stack as usize
920    })
921}