|  | import torch | 
|  | from typing import Any | 
|  | # The _get_device_index has been moved to torch.utils._get_device_index | 
|  | from torch._utils import _get_device_index as _torch_get_device_index | 
|  |  | 
|  |  | 
|  | def _get_device_index(device: Any, optional: bool = False, | 
|  | allow_cpu: bool = False) -> int: | 
|  | r"""Gets the device index from :attr:`device`, which can be a torch.device | 
|  | object, a Python integer, or ``None``. | 
|  |  | 
|  | If :attr:`device` is a torch.device object, returns the device index if it | 
|  | is a CUDA device. Note that for a CUDA device without a specified index, | 
|  | i.e., ``torch.device('cuda')``, this will return the current default CUDA | 
|  | device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, | 
|  | CPU devices will be accepted and ``-1`` will be returned in this case. | 
|  |  | 
|  | If :attr:`device` is a Python integer, it is returned as is. | 
|  |  | 
|  | If :attr:`device` is ``None``, this will return the current default CUDA | 
|  | device if :attr:`optional` is ``True``. | 
|  | """ | 
|  | if isinstance(device, str): | 
|  | device = torch.device(device) | 
|  | if isinstance(device, torch.device): | 
|  | if allow_cpu: | 
|  | if device.type not in ['cuda', 'cpu']: | 
|  | raise ValueError('Expected a cuda or cpu device, but got: {}'.format(device)) | 
|  | elif device.type != 'cuda': | 
|  | raise ValueError('Expected a cuda device, but got: {}'.format(device)) | 
|  | if not torch.jit.is_scripting(): | 
|  | if isinstance(device, torch.cuda.device): | 
|  | return device.idx | 
|  | return _torch_get_device_index(device, optional, allow_cpu) | 
|  |  | 
|  |  | 
|  | def _dummy_type(name: str) -> type: | 
|  | def init_err(self): | 
|  | class_name = self.__class__.__name__ | 
|  | raise RuntimeError( | 
|  | "Tried to instantiate dummy base class {}".format(class_name)) | 
|  | return type(name, (object,), {"__init__": init_err}) |