File size: 1,977 Bytes
8d7ec14 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14855
import torch
from ldm_patched.modules import args_parser
from ldm_patched.modules import model_management
def stream_context():
if torch.cuda.is_available():
return torch.cuda.stream
if model_management.is_intel_xpu():
return torch.xpu.stream
return None
def get_current_stream():
try:
if torch.cuda.is_available():
device = torch.device(torch.cuda.current_device())
stream = torch.cuda.current_stream(device)
with torch.cuda.stream(stream):
torch.zeros((1, 1)).to(device, torch.float32)
stream.synchronize()
return stream
if model_management.is_intel_xpu():
device = torch.device("xpu")
stream = torch.xpu.current_stream(device)
with torch.xpu.stream(stream):
torch.zeros((1, 1)).to(device, torch.float32)
stream.synchronize()
return stream
except:
return None
def get_new_stream():
try:
if torch.cuda.is_available():
device = torch.device(torch.cuda.current_device())
stream = torch.cuda.Stream(device)
with torch.cuda.stream(stream):
torch.zeros((1, 1)).to(device, torch.float32)
stream.synchronize()
return stream
if model_management.is_intel_xpu():
device = torch.device("xpu")
stream = torch.xpu.Stream(device)
with torch.xpu.stream(stream):
torch.zeros((1, 1)).to(device, torch.float32)
stream.synchronize()
return stream
except:
return None
current_stream = None
mover_stream = None
using_stream = False
if args_parser.args.cuda_stream:
current_stream = get_current_stream()
mover_stream = get_new_stream()
using_stream = current_stream is not None and mover_stream is not None
|