Skip to content

vllm.utils.platform_utils

cuda_get_device_properties

cuda_get_device_properties(
    device, names: Sequence[str], init_cuda=False
) -> tuple[Any, ...]

Get specified CUDA device property values without initializing CUDA in the current process.

Source code in vllm/utils/platform_utils.py
def cuda_get_device_properties(
    device, names: Sequence[str], init_cuda=False
) -> tuple[Any, ...]:
    """Get specified CUDA device property values without initializing CUDA in
    the current process."""
    if init_cuda or cuda_is_initialized():
        props = torch.cuda.get_device_properties(device)
        return tuple(getattr(props, name) for name in names)

    # Run in subprocess to avoid initializing CUDA as a side effect.
    mp_ctx = multiprocessing.get_context("fork")
    with ProcessPoolExecutor(max_workers=1, mp_context=mp_ctx) as executor:
        return executor.submit(cuda_get_device_properties, device, names, True).result()

cuda_is_initialized

cuda_is_initialized() -> bool

Check if CUDA is initialized.

Source code in vllm/utils/platform_utils.py
def cuda_is_initialized() -> bool:
    """Check if CUDA is initialized."""
    if not torch.cuda._is_compiled():
        return False
    return torch.cuda.is_initialized()

is_pin_memory_available cached

is_pin_memory_available() -> bool
Source code in vllm/utils/platform_utils.py
@cache
def is_pin_memory_available() -> bool:
    from vllm.platforms import current_platform

    return current_platform.is_pin_memory_available()

is_uva_available cached

is_uva_available() -> bool

Check if Unified Virtual Addressing (UVA) is available.

Source code in vllm/utils/platform_utils.py
@cache
def is_uva_available() -> bool:
    """Check if Unified Virtual Addressing (UVA) is available."""
    # UVA requires pinned memory.
    # TODO: Add more requirements for UVA if needed.
    return is_pin_memory_available()

xpu_is_initialized

xpu_is_initialized() -> bool

Check if XPU is initialized.

Source code in vllm/utils/platform_utils.py
def xpu_is_initialized() -> bool:
    """Check if XPU is initialized."""
    if not torch.xpu._is_compiled():
        return False
    return torch.xpu.is_initialized()